content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
---|---|---|
from datetime import datetime, timedelta
import pytest
from api.models.timetables import Timetable
from fastapi import status
from fastapi.testclient import TestClient
pytestmark = pytest.mark.asyncio
@pytest.fixture
def timetable2(timetable):
return Timetable(
id=1,
action="on",
start=datetime.now(tz=None),
duration=timedelta(minutes=3),
repeat=timedelta(weeks=1)
)
@pytest.fixture
def modified_timetable(timetable):
return Timetable(
id=11,
action="on",
start=datetime.now(tz=None),
duration=timedelta(minutes=6),
repeat=timedelta(weeks=7)
)
class TestRouteTimetable:
async def test_add_timetables(self, client: TestClient, timetable2: Timetable):
# We need to use content=timetable.json() because datetime is not json serializable
# but pydantic can serialize it.
responseAdd = await client.post("/timetables", content=timetable2.json())
timetableAdd = Timetable(**responseAdd.json())
assert responseAdd.status_code == status.HTTP_200_OK
responseGet = await client.get(f"/timetables/{timetable2.id}")
timetableGet = Timetable(**responseGet.json())
assert timetableAdd == timetableGet
async def test_get_timetables_id(self, client: TestClient, timetable: Timetable):
response = await client.get(f"/timetables/{timetable.id}")
assert response.status_code == status.HTTP_200_OK
assert timetable == Timetable(**response.json())
response = await client.get("/timetables/666")
assert response.status_code == status.HTTP_404_NOT_FOUND
async def test_edit_a_timetable(self, client: TestClient, timetable: Timetable, modified_timetable: Timetable):
response = await client.put(f"/timetables/{timetable.id}", content=modified_timetable.json())
assert modified_timetable == Timetable(**response.json())
assert timetable != Timetable(**response.json())
response = await client.get(f"/timetables/{response.json()['id']}")
assert modified_timetable == Timetable(**response.json())
response = await client.put("/timetables/10", content=modified_timetable.json())
assert response.status_code == status.HTTP_404_NOT_FOUND
async def test_delete_timetable(self, client: TestClient, timetable: Timetable):
response = await client.delete(f"/timetables/{timetable.id}")
assert response.status_code == status.HTTP_200_OK
response = await client.get(f"/timetables/{timetable.id}")
assert response.status_code == status.HTTP_404_NOT_FOUND
response = await client.delete(f"/timetables/{timetable.id}")
assert response.status_code == status.HTTP_404_NOT_FOUND
async def test_get_timetables(self, client: TestClient, timetable: Timetable):
response = await client.get("/timetables")
assert response.status_code == status.HTTP_200_OK
assert len(response.json()) == 1
|
nilq/baby-python
|
python
|
import enum
from typing import Optional
from sqlalchemy import (
BigInteger,
Boolean,
Column,
DateTime,
Enum,
Float,
ForeignKey,
ForeignKeyConstraint,
Index,
Integer,
String,
UnicodeText,
func
)
from sqlalchemy.orm import relationship
from .database import Base
UNKNOWN_OWNER = "Unknown"
HOUSING_DEVAL_FACTOR = 0.0042
class EventType(enum.Enum):
HOUSING_WARD_INFO = "HOUSING_WARD_INFO"
# LAND_UPDATE (house sold, reloed, autodemoed, etc)
# https://github.com/SapphireServer/Sapphire/blob/master/src/common/Network/PacketDef/Zone/ServerZoneDef.h#L1888
# https://github.com/SapphireServer/Sapphire/blob/master/src/world/Manager/HousingMgr.cpp#L365
# LAND_SET_INITIALIZE (sent on zonein)
# https://github.com/SapphireServer/Sapphire/blob/master/src/common/Network/PacketDef/Zone/ServerZoneDef.h#L1943
# https://github.com/SapphireServer/Sapphire/blob/master/src/world/Territory/HousingZone.cpp#L197
# LAND_SET_MAP (sent on zonein, after init, probably the useful one)
# https://github.com/SapphireServer/Sapphire/blob/master/src/common/Network/PacketDef/Zone/ServerZoneDef.h#L1929
# https://github.com/SapphireServer/Sapphire/blob/master/src/world/Territory/HousingZone.cpp#L154
# other packets:
# LAND_INFO_SIGN (view placard on owned house) - probably not useful, if we get this we already got a LAND_SET_MAP
# and if the ward changed since then, we got a LAND_UPDATE
# LAND_PRICE_UPDATE (view placard on unowned house) - similar to above, plus spammy if someone is buying a house
# ==== Table defs ====
class Sweeper(Base):
__tablename__ = "sweepers"
id = Column(BigInteger, primary_key=True)
name = Column(String)
world_id = Column(Integer, ForeignKey("worlds.id"))
last_seen = Column(DateTime, nullable=True, server_default=func.now(), onupdate=func.now())
world = relationship("World", back_populates="sweepers")
events = relationship("Event", back_populates="sweeper")
class World(Base):
__tablename__ = "worlds"
id = Column(Integer, primary_key=True)
name = Column(String, index=True)
sweepers = relationship("Sweeper", back_populates="world")
class District(Base):
__tablename__ = "districts"
id = Column(Integer, primary_key=True) # territoryTypeId
name = Column(String, unique=True)
land_set_id = Column(Integer, unique=True, index=True)
class PlotInfo(Base):
__tablename__ = "plotinfo"
territory_type_id = Column(Integer, ForeignKey("districts.id"), primary_key=True)
plot_number = Column(Integer, primary_key=True)
house_size = Column(Integer)
house_base_price = Column(Integer)
district = relationship("District", viewonly=True)
class PlotState(Base):
__tablename__ = "plot_states"
__table_args__ = (
ForeignKeyConstraint(
("territory_type_id", "plot_number"),
("plotinfo.territory_type_id", "plotinfo.plot_number")
),
)
id = Column(Integer, primary_key=True)
world_id = Column(Integer, ForeignKey("worlds.id"))
territory_type_id = Column(Integer, ForeignKey("districts.id"))
ward_number = Column(Integer)
plot_number = Column(Integer)
last_seen = Column(Float) # UNIX seconds
first_seen = Column(Float)
is_owned = Column(Boolean)
last_seen_price = Column(Integer, nullable=True) # null for unknown price
owner_name = Column(String, nullable=True) # "Unknown" for unknown owner (UNKNOWN_OWNER), used to build relo graph
is_fcfs = Column(Boolean)
lotto_entries = Column(Integer, nullable=True) # null if the plot is FCFS
world = relationship("World", viewonly=True)
district = relationship("District", viewonly=True)
plot_info = relationship("PlotInfo", viewonly=True)
@property
def num_devals(self) -> Optional[int]: # todo(6.1): delete me
"""
Returns the number of price this house has devalued. If the price is unknown, returns None.
If price>max, returns 0.
"""
if self.last_seen_price is None:
return None
max_price = self.plot_info.house_base_price
if self.last_seen_price >= max_price:
return 0
return round((max_price - self.last_seen_price) / (HOUSING_DEVAL_FACTOR * max_price))
# common query indices
Index(
"ix_plot_states_loc_last_seen_desc",
# these 4 make up the plot state's unique location
PlotState.world_id,
PlotState.territory_type_id,
PlotState.ward_number,
PlotState.plot_number,
# and this is for convenience
PlotState.last_seen.desc()
)
Index("ix_plot_states_last_seen_desc", PlotState.last_seen.desc())
# store of all ingested events for later analysis (e.g. FC/player ownership, relocation/resell graphs, etc)
class Event(Base):
__tablename__ = "events"
id = Column(Integer, primary_key=True)
sweeper_id = Column(BigInteger, ForeignKey("sweepers.id", ondelete="SET NULL"), nullable=True, index=True)
timestamp = Column(Float, index=True)
event_type = Column(Enum(EventType), index=True)
data = Column(UnicodeText)
sweeper = relationship("Sweeper", back_populates="events")
|
nilq/baby-python
|
python
|
#!/usr/local/bin/python3
import os
import re
import sys
import argparse
import plistlib
import json
def modifyPbxproj():
data = ''
flag = False
end = False
with open(filePath, 'r') as file:
for line in file.readlines():
if not end:
find = line.find('3B02599D20F49A43001F9C82 /* Debug */')
if find != -1:
flag = True
if flag and re.search('PRODUCT_BUNDLE_IDENTIFIER', line):
line = line.replace('quanbin.jin-test.sharkORMDemo', 'quanbin.jin-test.Demo')
end = True
data += line
with open(filePath, 'w') as file:
file.writelines(data)
# modify display name, version and build in info.plist file
def modifyInfoPlist (displayName, version, build):
plistPath = os.path.join(filePath, 'Butler/ButlerForRemain/ButlerForRemain-Info.plist')
with open(plistPath, 'rb') as fp:
plist = plistlib.load(fp)
plist['CFBundleVersion'] = build
plist['CFBundleDisplayName'] = displayName
plist['CFBundleShortVersionString'] = version
with open(plistPath, 'wb') as fp:
plistlib.dump(plist, fp)
# 解析JSON文件, 验证数据完整性
def jsonParser(filePath):
with open(filePath) as fp:
jsonObj = json.load(fp)
try:
jsonObj["requestURL"]
jsonObj["version"]
jsonObj["build"]
jsonObj["displayName"]
except KeyError as undefinedKey:
print(str(undefinedKey) + ' missed')
exit(0)
return jsonObj
def setRequestBaseURL(baseURL):
with open as target:
pass
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('filePath', help='工程根目录')
filePath = parser.parse_args().filePath
# modifyInfoPlist('物管APP', '1.9.2_A1', '2')
config = jsonParser('/Users/remain/Desktop/pythonTest/jsonFile')
exit(0)
|
nilq/baby-python
|
python
|
# coding: utf-8
# # Dogs-vs-cats classification with ViT
#
# In this notebook, we'll finetune a [Vision Transformer]
# (https://arxiv.org/abs/2010.11929) (ViT) to classify images of dogs
# from images of cats using TensorFlow 2 / Keras and HuggingFace's
# [Transformers](https://github.com/huggingface/transformers).
#
# **Note that using a GPU with this notebook is highly recommended.**
#
# First, the needed imports.
from transformers import __version__ as transformers_version
from transformers.utils import check_min_version
check_min_version("4.13.0.dev0")
from transformers import ViTFeatureExtractor, TFViTForImageClassification
import tensorflow as tf
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import TensorBoard
from PIL import Image
import os, sys, datetime
import pathlib
import numpy as np
print('Using TensorFlow version:', tf.__version__,
'Keras version:', tf.keras.__version__,
'Transformers version:', transformers_version)
# ## Data
if 'DATADIR' in os.environ:
DATADIR = os.environ['DATADIR']
else:
DATADIR = "/scratch/project_2005299/data/"
print('Using DATADIR', DATADIR)
datapath = os.path.join(DATADIR, "dogs-vs-cats/train-2000/")
assert os.path.exists(datapath), "Data not found at "+datapath
# The training dataset consists of 2000 images of dogs and cats, split
# in half. In addition, the validation set and test set consists of
# 1000 and 22000 images, respectively.
nimages = {'train':2000, 'validation':1000, 'test':22000}
# ### Image paths and labels
def get_paths(dataset):
data_root = pathlib.Path(datapath+dataset)
image_paths = list(data_root.glob('*/*'))
image_paths = [str(path) for path in image_paths]
image_count = len(image_paths)
assert image_count == nimages[dataset], \
"Found {} images, expected {}".format(image_count, nimages[dataset])
return image_paths
image_paths = dict()
image_paths['train'] = get_paths('train')
image_paths['validation'] = get_paths('validation')
image_paths['test'] = get_paths('test')
label_names = sorted(item.name for item in
pathlib.Path(datapath+'train').glob('*/')
if item.is_dir())
label_to_index = dict((name, index) for index,name in enumerate(label_names))
def get_labels(dataset):
return [label_to_index[pathlib.Path(path).parent.name]
for path in image_paths[dataset]]
image_labels = dict()
image_labels['train'] = get_labels('train')
image_labels['validation'] = get_labels('validation')
image_labels['test'] = get_labels('test')
# ### Data loading
#
# First we specify the pre-trained ViT model we are going to use. The
# model ["google/vit-base-patch16-224"]
# (https://huggingface.co/google/vit-base-patch16-224) is pre-trained
# on ImageNet-21k (14 million images, 21,843 classes) at resolution
# 224x224, and fine-tuned on ImageNet 2012 (1 million images, 1,000
# classes) at resolution 224x224.
#
# We'll use a pre-trained ViT feature extractor that matches the ViT
# model to preprocess the input images.
VITMODEL = 'google/vit-base-patch16-224'
feature_extractor = ViTFeatureExtractor.from_pretrained(VITMODEL)
# Next we define functions to load and preprocess the images:
def _load_and_process_image(path, label):
img = Image.open(path.numpy()).convert("RGB")
proc_img = feature_extractor(images=img,
return_tensors="np")['pixel_values']
return np.squeeze(proc_img), label
def load_and_process_image(path, label):
image, label = tf.py_function(_load_and_process_image,
(path, label), (tf.float32, tf.int32))
image.set_shape([None, None, None])
label.set_shape([])
return image, label
# ### TF Datasets
#
# Let's now define our TF Datasets for training and validation data.
BATCH_SIZE = 32
dataset_train = tf.data.Dataset.from_tensor_slices((image_paths['train'],
image_labels['train']))
dataset_train = dataset_train.map(load_and_process_image,
num_parallel_calls=tf.data.AUTOTUNE)
dataset_train = dataset_train.shuffle(len(dataset_train)).batch(
BATCH_SIZE, drop_remainder=True)
dataset_validation = tf.data.Dataset.from_tensor_slices(
(image_paths['validation'], image_labels['validation']))
dataset_validation = dataset_validation.map(load_and_process_image,
num_parallel_calls=tf.data.AUTOTUNE)
dataset_validation = dataset_validation.batch(BATCH_SIZE, drop_remainder=True)
# ## Model
#
# ### Initialization
model = TFViTForImageClassification.from_pretrained(
VITMODEL, num_labels=1, ignore_mismatched_sizes=True)
LR = 1e-5
optimizer = tf.keras.optimizers.Adam(learning_rate=LR)
loss = tf.keras.losses.BinaryCrossentropy(from_logits=False)
metric = 'accuracy'
model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
print(model.summary())
# ### Learning
logdir = os.path.join(
os.getcwd(), "logs",
"dvc-vit-"+datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
print('TensorBoard log directory:', logdir)
os.makedirs(logdir)
callbacks = [TensorBoard(log_dir=logdir)]
EPOCHS = 4
history = model.fit(dataset_train, validation_data=dataset_validation,
epochs=EPOCHS, verbose=2, callbacks=callbacks)
# ### Inference
#
# We now evaluate the model using the test set. First we'll define the
# TF Dataset for the test images.
dataset_test = tf.data.Dataset.from_tensor_slices((image_paths['test'],
image_labels['test']))
dataset_test = dataset_test.map(load_and_process_image,
num_parallel_calls=tf.data.AUTOTUNE)
dataset_test = dataset_test.batch(BATCH_SIZE, drop_remainder=False)
scores = model.evaluate(dataset_test, verbose=2)
print("Test set %s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
|
nilq/baby-python
|
python
|
#################################################################################
# Autor: Richard Alexander Cordova Herrera
# TRABAJO FIN DE MASTER
# CURSO 2019-2020
# MASTER EN INTERNET DE LAS COSAS
# FACULTAD DE INFORMATICA
# UNIVERSIDAD COMPLUTENSE DE MADRID
#################################################################################
#################################################################################
# Importa librerias necesarias para el funcionamiento de la aplicacion
import csv
import pymongo
from datetime import datetime
from datetime import timedelta
#################################################################################
#################################################################################
# Inicio - Definicion Funciones
#################################################################################
# Funcion dataGrap: Genera un pydictionary
def dataGraph(dateStart, dateEnd, dataAcc, dataGyr):
dataAccAux = []
dataGyrAux = []
dataAccAux.append({'time' : dateStart,
'sensorAccX' : dataAcc[0],
'sensorAccY' : dataAcc[1],
'sensorAccZ' : dataAcc[2]})
dataAccAux.append({'time' : dateEnd,
'sensorAccX' : dataAcc[0],
'sensorAccY' : dataAcc[1],
'sensorAccZ' : dataAcc[2]})
dataGyrAux.append({'time' : dateStart,
'sensorGyrX' : dataGyr[0],
'sensorGyrY' : dataGyr[1],
'sensorGyrZ' : dataGyr[2]})
dataGyrAux.append({'time' : dateEnd,
'sensorGyrX' : dataGyr[0],
'sensorGyrY' : dataGyr[1],
'sensorGyrZ' : dataGyr[2]})
return dataAccAux, dataGyrAux
#################################################################################
#################################################################################
# Inicio Programa General
# Path General, archivos del codigo
globalPath = "/home/tfm-iot/Documentos/TFM/Ejecutables/"
# Bandera Inicio de la aplicacion
print("Incio Script: " + str(datetime.now()))
#################################################################################
#################################################################################
# Abrir archivo configuracion parametros de busqueda
archivo = open(globalPath + "TFMMIoTIgnDataSearch.txt", 'r')
dateParameters = archivo.read()
archivo.close()
if len(dateParameters) == 0 :
dateStart = 1577461660762
dateEnd = 1577461668910
cmd = 0
else:
dateStart = int(dateParameters[0 : dateParameters.find(" ")])
dateParametersAux = dateParameters[dateParameters.find(" ") + 1 :]
dateEnd = int(dateParametersAux[0 : dateParametersAux.find(" ")])
dateParametersAux = dateParametersAux[dateParametersAux.find(" ") + 1 :]
cmd = int(dateParametersAux[0 : dateParametersAux.find(" ")])
cmdSearch = "$eq"
if cmd == 0:
cmdSearch = "$gte"
#################################################################################
#################################################################################
# Configuracion conexion base de datos MongoDB
serverIp = "192.168.1.52"
serverIp = "cripta.fdi.ucm.es"
serverPort = "27017"
serverPort = "27118"
database = "TFMMIoT"
collection = "Dobot"
serverAddress = "mongodb://" + serverIp + ":" + serverPort + "/"
myclient = pymongo.MongoClient(serverAddress)
mydb = myclient[database]
mycol = mydb[collection]
#################################################################################
#################################################################################
# Busqueda de datos en MongoDB, correspondiente al rango
# ingresado
queryData = mycol.find({"sensor": "movimiento",
"movCode" : {cmdSearch: cmd},
"time" : { "$gt" : dateStart,
"$lt" : dateEnd }},{ "_id" : 0,
"time" : 1,
"sensorAccX" : 1,
"sensorAccZ" : 1,
"sensorAccY" : 1,
"sensorGyrX" : 1,
"sensorGyrZ" : 1,
"sensorGyrY" : 1}).sort("time", pymongo.ASCENDING)
data = list(queryData)
#################################################################################
#################################################################################
# Caso 1: No existen Datos en el rango de fechas seleccionado.
# Accion a realizar: Busqueda del ultimo dato registrado
if (len(data) == 0 ) :
queryData = mycol.find({"sensor": "movimiento",
"time" : { "$lt" : dateEnd }},{ "_id" : 0,
"time" : 1,
"sensorAccX" : 1,
"sensorAccY" : 1,
"sensorAccZ" : 1,
"sensorGyrX" : 1,
"sensorGyrY" : 1,
"sensorGyrZ" : 1}).sort("time", pymongo.DESCENDING).limit(1)
data = list(queryData)
##############################################################
# Caso 1.1: No existen ningun registro almacenado.
# Accion a Realizar: Grafica con valores en 0
if len(data) == 0 :
dataAccAux = [0, 0, 0]
dataGyrAux = [0, 0, 0]
dataAcc, dataGyr = dataGraph(dateStart, dateEnd, dataAccAux, dataGyrAux)
##############################################################
##############################################################
# Caso 1.2: Existen registros almacenados.
# Accion a Realizar: Seleccionar ultimo valor y construir
# la estructura para graficar los datos
else :
dataAccAux = [data[0]["sensorAccX"], data[0]["sensorAccY"], data[0]["sensorAccZ"]]
dataGyrAux = [data[0]["sensorGyrX"], data[0]["sensorGyrY"], data[0]["sensorGyrZ"]]
dataAcc, dataGyr = dataGraph(dateStart, dateEnd, dataAccAux, dataGyrAux)
##############################################################
#################################################################################
#################################################################################
# Caso 2: Existen Datos en el rango de fechas seleccionado.
# Accion a realizar: Procesar datos y construir la estructura
# para graficar los datos
else :
dataSize = 6000
if len(data) < dataSize :
dataSize = len(data)
dataToSkip = int(len(data) / dataSize)
mycol.create_index('time')
dataAcc = []
dataGyr = []
for i in range(dataSize) :
dataAcc.append({'time' : data[i*dataToSkip]['time'],
'sensorAccX' : data[i*dataToSkip]['sensorAccX'],
'sensorAccY' : data[i*dataToSkip]['sensorAccY'],
'sensorAccZ' : data[i*dataToSkip]['sensorAccZ']})
dataGyr.append({'time' : data[i*dataToSkip]['time'],
'sensorGyrX' : data[i*dataToSkip]['sensorGyrX'],
'sensorGyrY' : data[i*dataToSkip]['sensorGyrY'],
'sensorGyrZ' : data[i*dataToSkip]['sensorGyrZ']})
#################################################################################
#################################################################################
# Actualizar Ficheros dataAcc.txt y dataGyr.txt, para graficar
# en Ignition
fileName = "TFMMIoTIgnDataAcc.txt"
file = open(globalPath + fileName, "w")
file.write(str(dataAcc))
file.close()
fileName = "TFMMIoTIgnDataGyr.txt"
file = open(globalPath + fileName, "w")
file.write(str(dataGyr))
file.close()
fileName = "TFMMIoTIgnDataSearch.txt"
file = open(globalPath + fileName, "w")
file.write(str(dateStart) + " " + str(dateEnd) + " " + str(cmd) + " Fin")
file.close()
#################################################################################
#################################################################################
# Bandera Fin del Script, imprimir datos importantes
print("Dimensiones Data Query: " + str(len(data)))
print("Dimensiones Data: " + str(len(dataAcc)))
print("Fin Script: " + str(datetime.now()))
print("Datos Consulta")
print("Fecha Inicio: " + str(dateStart) + " Fecha Fin: " + str(dateEnd))
#################################################################################
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
class TambahVaksinConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'tambah_vaksin'
|
nilq/baby-python
|
python
|
# Data Preprocessing Template
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Salary_Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 1].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0)
# Simple linear regres
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Predicting the Test set result
y_pred = regressor.predict(X_test)
# Visualising the Training set result
plt.scatter(X_train, y_train, color='red')
plt.plot(X_train, regressor.predict(X_train), color='blue')
plt.title('Salary vs Experience (Training Set)')
plt.xlabel('Experience (Years)')
plt.ylabel('Salary ($)')
plt.show()
# Visualising the Test set result
plt.scatter(X_test, y_test, color='red')
plt.plot(X_train, regressor.predict(X_train), color='blue')
plt.title('Salary vs Experience (Test Set)')
plt.xlabel('Experience (Years)')
plt.ylabel('Salary ($)')
plt.show()
|
nilq/baby-python
|
python
|
"""Utilities relative to hunspell itself."""
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 30 19:39:10 2020
@author: esol
"""
from neqsim.thermo import fluid, addOilFractions, printFrame, dataFrame, fluidcreator,createfluid,createfluid2, TPflash, phaseenvelope
from neqsim.process import pump, clearProcess, stream, valve, separator, compressor, runProcess, viewProcess, heater, mixer, recycle
from neqsim.thermo import fluid, TPflash, phaseenvelope, fluidComposition
from neqsim.process import clearProcess, stream, valve, separator,compressor, runProcess, viewProcess, heater, mixer, recycle
# Start by creating a fluid in neqsim uing a predifined fluid (dry gas, rich gas, light oil, black oil)
#Set temperature and pressure and do a TPflash. Show results in a dataframe.
feedPressure = 50.0
feedTemperature = 30.0
fluid1 = fluid("cpa") # create a fluid using the SRK-EoS
fluid1.addComponent("CO2",1e-10)
fluid1.addComponent("methane",1e-10)
fluid1.addComponent("ethane",1e-10)
fluid1.addComponent("propane",1e-10)
fluid1.addComponent("water",1e-10)
fluid1.addComponent("TEG",1e-10)
fluid1.setMixingRule(10)
fluid1.setMultiPhaseCheck(True)
fluidcomposition = [0.031, 0.9297, 0.0258, 0.0135, 6.48413454028242e-002,
1.0e-15]
fluidComposition(fluid1, fluidcomposition)
fluid1.setTemperature(feedTemperature, "C")
fluid1.setPressure(feedPressure, "bara")
fluid1.setTotalFlowRate(5.0, "MSm3/day")
fluid2= fluid("cpa")
fluid2.addComponent("CO2", 1.0e-10)
fluid2.addComponent("methane", 1.0e-10)
fluid2.addComponent("ethane", 1.0e-10)
fluid2.addComponent("propane", 1.0e-10)
fluid2.addComponent("water", 1.0, 'kg/sec')
fluid2.addComponent("TEG", 99.0, 'kg/sec')
fluid2.setMixingRule(10)
fluid2.setMultiPhaseCheck(True)
fluid2.setTemperature(313.15, "K")
fluid2.setPressure(75.0, "bara")
fluid2.setTotalFlowRate(10625.0, 'kg/hr')
# demonstration of setting up a simple process calculation
clearProcess()
stream1 = stream(fluid1)
glycolstream = stream(fluid2)
separator1 = separator(stream1, "inlet separator")
compressor1 = compressor(separator1.getGasOutStream(), 75.0)
heater1 = heater(compressor1.getOutStream())
heater1.setOutTemperature(313.0)
mixer1 = mixer()
mixer1.addStream(heater1.getOutStream())
mixer1.addStream(glycolstream)
scrubberLP = separator(mixer1.getOutStream())
valve1 = valve(scrubberLP.getLiquidOutStream(), 10.0, "Glycol valve")
flashDrum = separator(valve1.getOutStream())
heater1 = heater(flashDrum.getLiquidOutStream())
heater1.setOutTemperature(273.15+195.0)
stripper = separator(heater1.getOutStream())
cooler1 = heater(stripper.getLiquidOutStream())
cooler1.setOutTemperature(313.0)
pump1 = pump(cooler1.getOutStream(), 75.0)
runProcess()
print("1")
runProcess()
print("2")
runProcess()
print("3")
|
nilq/baby-python
|
python
|
# Copyright 2018 Lawrence Kesteloot
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import sys
import sha
import stat
import os
# Using a hash is much slower and doesn't handle duplicate files well. Leaving this here
# because we may want to later add a way to detect duplicate files.
USE_HASH = False
# Take a filename and escape spaces. Doesn't handle all shell special characters (quotes, etc.).
def shell_friendly(filename):
return filename.replace(" ", "\\ ")
# Return a unique identifier for this file, as a constant-width string.
def get_file_identifier(pathname):
if USE_HASH:
contents = open(pathname).read()
identifier = sha.sha(contents).hexdigest()
else:
# Use inode number.
s = os.stat(pathname)
identifier = "%-15d" % s[stat.ST_INO]
return identifier
# Generate the data file.
def generate_file():
for filename in glob.glob("*"):
print get_file_identifier(filename) + " " + filename
# Read the data file and rename the files.
def rename_files(data_file):
# Read data file.
id_to_new_filename = {}
for line in open(data_file):
line = line.strip()
# Break at the first space.
space = line.find(" ")
if space == -1:
sys.stderr.write("WARNING: This line has no filename: " + line)
else:
file_id = line[:space]
filename = line[space + 1:].strip()
id_to_new_filename[file_id] = filename
# Read file identifiers from disk.
id_to_old_filename = {}
for filename in glob.glob("*"):
id_to_old_filename[get_file_identifier(filename).strip()] = filename
# Generate the script.
for file_id, old_filename in id_to_old_filename.items():
new_filename = id_to_new_filename.get(file_id)
if not new_filename:
sys.stderr.write("Identifier " + file_id + " not found in data file: " + old_filename + "\n")
else:
del id_to_new_filename[file_id]
if new_filename != old_filename:
print "mv " + shell_friendly(old_filename) + " " + shell_friendly(new_filename)
# See if any lines in the file were unused.
for file_id, new_filename in id_to_new_filename.items():
sys.stderr.write("Filename not used in data file: " + new_filename + "\n")
def main():
if len(sys.argv) == 1:
generate_file()
elif len(sys.argv) == 2:
rename_files(sys.argv[1])
else:
sys.stderr.write("usage: RENAME.py [filename]\n")
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
def check():
import numpy as np
dataOK = np.loadtxt('nusselt_ref.out')
dataChk= np.loadtxt('data/post/wall/nusselt.out')
tol = 1e-6
nts = 10000
chk = (np.mean(dataOK[-nts:,2])-np.mean(dataChk[-nts:,2]))<tol
return chk
def test_answer():
assert check()
|
nilq/baby-python
|
python
|
import pytt
assert pytt.name == "pytt"
|
nilq/baby-python
|
python
|
from discord.ext import commands
import discord
import cogs
import random
import asyncio
import requests
from discord import File
import os
from datetime import datetime
import traceback
import tabula
import json
bot = commands.Bot(command_prefix='$')
class VipCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def chujwdupekuczkowiexe(self, ctx):
try:
with open("planlekcji.json", "r") as f:
pl = json.load(f)
dzien = datetime.today().strftime('%A')
if dzien == "Monday":
embed=discord.Embed(title="plan lekcji Poniedzialek",description=str(pl["Monday"]), color=0xE657EE)
embed.add_field(value=str(pl["Tuesday"]), name="Wtorek",inline=False)
await ctx.send(embed=embed)
if dzien == "Tuesday":
embed=discord.Embed(title="Plan lekcji Wtorek", description=str(pl["Tuesday"]), color=0xE657EE)
embed.add_field(value=str(pl["Wednesday"]), name="Sroda",inline=False)
await ctx.send(embed=embed)
if dzien == "Wednesday":
embed=discord.Embed(title="Plan lekcji Sroda", description=str(pl["Wednesday"]), color=0xE657EE)
embed.add_field(value=str(pl["Thursday"]), name="Czwartek",inline=False)
await ctx.send(embed=embed)
if dzien == "Thursday":
embed=discord.Embed(title="Plan lekcji Czwartek", description=str(pl["Thursday"]), color=0xE657EE)
embed.add_field(value=str(pl["Friday"]), name="Piatek",inline=False)
await ctx.send(embed=embed)
if dzien == "Friday":
embed=discord.Embed(title="Plan lekcji Piatek", description=str(pl["Friday"]), color=0xE657EE)
embed.add_field(value=str(pl["Monday"]), name="Poniedzialek",inline=False)
await ctx.send(embed=embed)
except:
await ctx.send(traceback.format_exc())
@commands.command()
async def chujciwdupkekurwo(self, ctx, *, arg):
try:
await ctx.send(arg, tts=True)
except:
await ctx.send(f"```python\n{traceback.format_exc()}```")
def setup(bot):
bot.add_cog(VipCog(bot))
print('Vip Gotowe')
|
nilq/baby-python
|
python
|
from collections import OrderedDict
from copy import deepcopy
from functools import partial
from ml_collections import ConfigDict
import numpy as np
import jax
import jax.numpy as jnp
import flax
import flax.linen as nn
from flax.training.train_state import TrainState
import optax
import distrax
from .jax_utils import next_rng, value_and_multi_grad, mse_loss
from .model import Scalar, update_target_network
from .utils import prefix_metrics
class BC(object):
@staticmethod
def get_default_config(updates=None):
config = ConfigDict()
config.policy_lr = 3e-4
config.optimizer_type = 'adam'
config.alpha_multiplier = 0.0
config.use_automatic_entropy_tuning = True
config.target_entropy = 0.0
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
def __init__(self, config, policy):
self.config = self.get_default_config(config)
self.policy = policy
self.observation_dim = policy.observation_dim
self._train_states = {}
optimizer_class = {
'adam': optax.adam,
'sgd': optax.sgd,
}[self.config.optimizer_type]
policy_params = self.policy.init(next_rng(), next_rng(), jnp.zeros((10, self.observation_dim)))
self._train_states['policy'] = TrainState.create(
params=policy_params,
tx=optimizer_class(self.config.policy_lr),
apply_fn=None
)
model_keys = ['policy']
if self.config.use_automatic_entropy_tuning:
self.log_alpha = Scalar(0.0)
self._train_states['log_alpha'] = TrainState.create(
params=self.log_alpha.init(next_rng()),
tx=optimizer_class(self.config.policy_lr),
apply_fn=None
)
model_keys.append('log_alpha')
self._model_keys = tuple(model_keys)
self._total_steps = 0
def train(self, batch):
self._total_steps += 1
self._train_states, metrics = self._train_step(
self._train_states, next_rng(), batch
)
return metrics
@partial(jax.jit, static_argnames=('self'))
def _train_step(self, train_states, rng, batch):
def loss_fn(train_params, rng):
observations = batch['observations']
actions = batch['actions']
batch_size, _ = jnp.shape(observations)
loss_collection = {}
rng, split_rng = jax.random.split(rng)
new_actions, log_pi = self.policy.apply(train_params['policy'], split_rng, observations)
if self.config.use_automatic_entropy_tuning:
alpha_loss = -self.log_alpha.apply(train_params['log_alpha']) * (log_pi + self.config.target_entropy).mean()
loss_collection['log_alpha'] = alpha_loss
alpha = jnp.exp(self.log_alpha.apply(train_params['log_alpha'])) * self.config.alpha_multiplier
else:
alpha_loss = 0.0
alpha = self.config.alpha_multiplier
""" Policy loss """
rng, split_rng = jax.random.split(rng)
log_probs = self.policy.apply(train_params['policy'], observations, actions, method=self.policy.log_prob)
policy_loss = (alpha*log_pi - log_probs).mean()
loss_collection['policy'] = policy_loss
negative_log_probs = -log_probs.mean()
return tuple(loss_collection[key] for key in self.model_keys), locals()
train_params = {key: train_states[key].params for key in self.model_keys}
(_, aux_values), grads = value_and_multi_grad(loss_fn, len(self.model_keys), has_aux=True)(train_params, rng)
new_train_states = {
key: train_states[key].apply_gradients(grads=grads[i][key])
for i, key in enumerate(self.model_keys)
}
metrics = dict(
policy_loss=aux_values['policy_loss'],
negative_log_probs=aux_values['negative_log_probs'],
alpha_loss=aux_values['alpha_loss'],
alpha=aux_values['alpha'],
)
return new_train_states, metrics
def log_likelihood(self, observations, actions):
actions = jnp.clip(actions, -1 + 1e-5, 1 - 1e-5)
log_prob = self.policy.apply(self.train_params['policy'], observations, actions, method=self.policy.log_prob)
return log_prob.mean()
@property
def model_keys(self):
return self._model_keys
@property
def train_states(self):
return self._train_states
@property
def train_params(self):
return {key: self.train_states[key].params for key in self.model_keys}
@property
def total_steps(self):
return self._total_steps
|
nilq/baby-python
|
python
|
import sys
import os
from inspect import getmembers
from types import BuiltinFunctionType, BuiltinMethodType, MethodType, FunctionType
import zipfile
from util import isIronPython, isJython, getPlatform
cur_path = os.path.abspath(os.path.dirname(__file__))
distPaths = [os.path.join(cur_path, '../../../indigo/dist'), os.path.join(cur_path, '../../dist/')]
success = False
if isIronPython():
raise RuntimeError("Indigo coverage is not supported in .NET")
elif isJython():
raise RuntimeError("Indigo coverage is not supported in Java")
else:
dll_full_path = os.path.normpath(os.path.join(cur_path, "../../../indigo/api/python"))
rdll_full_path = os.path.normpath(os.path.join(cur_path, "../../../indigo/api/plugins/renderer/python"))
idll_full_path = os.path.normpath(os.path.join(cur_path, "../../../indigo/api/plugins/inchi/python"))
bdll_full_path = os.path.normpath(os.path.join(cur_path, "../../../indigo/api/plugins/bingo/python"))
if not os.path.exists(os.path.join(dll_full_path, 'lib')):
for distPath in distPaths:
if not os.path.exists(distPath):
continue
dll_full_path = '%s/python' % (distPath)
for item in os.listdir(distPath):
if item.startswith('indigo-python-') and item.endswith('.zip') and (item.find(getPlatform()) != -1 or item.find('universal') != -1):
curdir = os.path.abspath(os.curdir)
os.chdir(distPath)
if 'INDIGO_TEST_MODE' not in os.environ:
with zipfile.ZipFile(item) as zf:
zf.extractall()
os.environ['INDIGO_TEST_MODE'] = '1'
os.chdir(curdir)
dll_full_path = os.path.abspath(os.path.join(cur_path, distPath, item.replace('.zip', '')))
break
if not os.path.exists(dll_full_path):
continue
break
sys.path.insert(0, dll_full_path)
sys.path.insert(0, rdll_full_path)
sys.path.insert(0, idll_full_path)
sys.path.insert(0, bdll_full_path)
from indigo import Indigo, IndigoObject, IndigoException
from indigo_renderer import IndigoRenderer
from indigo_inchi import IndigoInchi
from bingo import Bingo, BingoException, BingoObject
success = True
if not success:
raise RuntimeError('Indigo not found at %s' % distPaths)
class IndigoObjectCoverageWrapper(IndigoObject):
def __init__(self, dispatcher, id, parent=None):
IndigoObject.__init__(self, dispatcher, id, parent)
self._type = None
self._type = int(self.dbgInternalType()[1:3])
def __getattribute__(self, item):
dispatcher = object.__getattribute__(self, 'dispatcher')
type = object.__getattribute__(self, '_type')
if dispatcher is not None:
if item in dispatcher._indigoObjectCoverageDict:
dispatcher._indigoObjectCoverageDict[item] += 1
if type:
if type not in dispatcher._indigoObjectCoverageByTypeDict:
dispatcher._indigoObjectCoverageByTypeDict[type] = {}
dispatcher._indigoObjectCoverageByTypeDict[type][item] = 1
else:
if item not in dispatcher._indigoObjectCoverageByTypeDict[type]:
dispatcher._indigoObjectCoverageByTypeDict[type][item] = 1
else:
dispatcher._indigoObjectCoverageByTypeDict[type][item] += 1
return object.__getattribute__(self, item)
class IndigoCoverageWrapper(Indigo):
def __init__(self, path=None):
Indigo.__init__(self, path)
if isJython() or isIronPython():
IndigoObject = IndigoObjectCoverageWrapper
# TODO: Change standard IndigoObject to IndigoObjectCoverageWrapper
else:
self.IndigoObject = IndigoObjectCoverageWrapper
self._indigoObjectCoverageDict = dict()
self._indigoObjectCoverageByTypeDict = dict()
m = self.createMolecule()
for item in getmembers(m):
if type(item[1]) in (BuiltinFunctionType, BuiltinMethodType, MethodType, FunctionType) and not item[0].startswith('_'):
self._indigoObjectCoverageDict[item[0]] = 0
self._indigoCoverageDict = dict()
for item in getmembers(self):
if type(item[1]) in (BuiltinFunctionType, BuiltinMethodType, MethodType, FunctionType) and not item[0].startswith('_'):
self._indigoCoverageDict[item[0]] = 0
def __getattribute__(self, item):
try:
indigoCoverageDict = object.__getattribute__(self, '_indigoCoverageDict')
if indigoCoverageDict:
if item in indigoCoverageDict:
indigoCoverageDict[item] += 1
except AttributeError:
pass
return object.__getattribute__(self, item)
def version(self):
return super(IndigoCoverageWrapper, self).version() + '-coverage'
class IndigoObjectTypeEnum:
SCANNER = 1
MOLECULE = 2
QUERY_MOLECULE = 3
REACTION = 4
QUERY_REACTION = 5
OUTPUT = 6
REACTION_ITER = 7
REACTION_MOLECULE = 8
GROSS = 9
SDF_LOADER = 10
SDF_SAVER = 11
RDF_MOLECULE = 12
RDF_REACTION = 13
RDF_LOADER = 14
SMILES_MOLECULE = 15
SMILES_REACTION = 16
MULTILINE_SMILES_LOADER = 17
ATOM = 18
ATOMS_ITER = 19
RGROUP = 20
RGROUPS_ITER = 21
RGROUP_FRAGMENT = 22
RGROUP_FRAGMENTS_ITER = 23
ARRAY = 24
ARRAY_ITER = 25
ARRAY_ELEMENT = 26
MOLECULE_SUBSTRUCTURE_MATCH_ITER = 27
MOLECULE_SUBSTRUCTURE_MATCHER = 28
REACTION_SUBSTRUCTURE_MATCHER = 29
SCAFFOLD = 30
DECONVOLUTION = 31
DECONVOLUTION_ELEM = 32
DECONVOLUTION_ITER = 33
PROPERTIES_ITER = 34
PROPERTY = 35
FINGERPRINT = 36
BOND = 37
BONDS_ITER = 38
ATOM_NEIGHBOR = 39
ATOM_NEIGHBORS_ITER = 40
SUPERATOM = 41
SUPERATOMS_ITER = 42
DATA_SGROUP = 43
DATA_SGROUPS_ITER = 44
REPEATING_UNIT = 45
REPEATING_UNITS_ITER = 46
MULTIPLE_GROUP = 47
MULTIPLE_GROUPS_ITER = 48
GENERIC_SGROUP = 49
GENERIC_SGROUPS_ITER = 50
SGROUP_ATOMS_ITER = 51
SGROUP_BONDS_ITER = 52
DECOMPOSITION = 53
COMPONENT = 54
COMPONENTS_ITER = 55
COMPONENT_ATOMS_ITER = 56
COMPONENT_BONDS_ITER = 57
SUBMOLECULE = 58
SUBMOLECULE_ATOMS_ITER = 59
SUBMOLECULE_BONDS_ITER = 60
MAPPING = 61
REACTION_MAPPING = 62
SSSR_ITER = 63
SUBTREES_ITER = 64
RINGS_ITER = 65
EDGE_SUBMOLECULE_ITER = 66
CML_MOLECULE = 67
CML_REACTION = 68
MULTIPLE_CML_LOADER = 69
SAVER = 70
ATTACHMENT_POINTS_ITER = 71
DECOMPOSITION_MATCH = 72
DECOMPOSITION_MATCH_ITER = 73
TAUTOMER_ITER = 74
TAUTOMER_MOLECULE = 75
IndigoObjectTypeDict = {
1: 'SCANNER',
2: 'MOLECULE',
3: 'QUERY_MOLECULE',
4: 'REACTION',
5: 'QUERY_REACTION',
6: 'OUTPUT',
7: 'REACTION_ITER',
8: 'REACTION_MOLECULE',
9: 'GROSS',
10: 'SDF_LOADER',
11: 'SDF_SAVER',
12: 'RDF_MOLECULE',
13: 'RDF_REACTION',
14: 'RDF_LOADER',
15: 'SMILES_MOLECULE',
16: 'SMILES_REACTION',
17: 'MULTILINE_SMILES_LOADER',
18: 'ATOM',
19: 'ATOMS_ITER',
20: 'RGROUP',
21: 'RGROUPS_ITER',
22: 'RGROUP_FRAGMENT',
23: 'RGROUP_FRAGMENTS_ITER',
24: 'ARRAY',
25: 'ARRAY_ITER',
26: 'ARRAY_ELEMENT',
27: 'MOLECULE_SUBSTRUCTURE_MATCH_ITER',
28: 'MOLECULE_SUBSTRUCTURE_MATCHER',
29: 'REACTION_SUBSTRUCTURE_MATCHER',
30: 'SCAFFOLD',
31: 'DECONVOLUTION',
32: 'DECONVOLUTION_ELEM',
33: 'DECONVOLUTION_ITER',
34: 'PROPERTIES_ITER',
35: 'PROPERTY',
36: 'FINGERPRINT',
37: 'BOND',
38: 'BONDS_ITER',
39: 'ATOM_NEIGHBOR',
40: 'ATOM_NEIGHBORS_ITER',
41: 'SUPERATOM',
42: 'SUPERATOMS_ITER',
43: 'DATA_SGROUP',
44: 'DATA_SGROUPS_ITER',
45: 'REPEATING_UNIT',
46: 'REPEATING_UNITS_ITER',
47: 'MULTIPLE_GROUP',
48: 'MULTIPLE_GROUPS_ITER',
49: 'GENERIC_SGROUP',
50: 'GENERIC_SGROUPS_ITER',
51: 'SGROUP_ATOMS_ITER',
52: 'SGROUP_BONDS_ITER',
53: 'DECOMPOSITION',
54: 'COMPONENT',
55: 'COMPONENTS_ITER',
56: 'COMPONENT_ATOMS_ITER',
57: 'COMPONENT_BONDS_ITER',
58: 'SUBMOLECULE',
59: 'SUBMOLECULE_ATOMS_ITER',
60: 'SUBMOLECULE_BONDS_ITER',
61: 'MAPPING',
62: 'REACTION_MAPPING',
63: 'SSSR_ITER',
64: 'SUBTREES_ITER',
65: 'RINGS_ITER',
66: 'EDGE_SUBMOLECULE_ITER',
67: 'CML_MOLECULE',
68: 'CML_REACTION',
69: 'MULTIPLE_CML_LOADER',
70: 'SAVER',
71: 'ATTACHMENT_POINTS_ITER',
72: 'DECOMPOSITION_MATCH',
73: 'DECOMPOSITION_MATCH_ITER',
74: 'TAUTOMER_ITER',
75: 'TAUTOMER_MOLECULE',
}
|
nilq/baby-python
|
python
|
import os
from .base import *
BASE_SITE_URL = 'https://rapidpivot.com'
AMQP_URL = 'amqp://guest:guest@localhost:5672//'
ALLOWED_HOSTS = ['rapidpivot.com']
ADMINS = (('Name', 'email@service.com'),)
DEBUG = False
TEMPLATE_DEBUG = False
# SSL/TLS Settings
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
os.environ['wsgi.url_scheme'] = 'https'
# Email Settings
EMAIL_USE_TLS = True
EMAIL_HOST = retrieve_secret_configuration("EMAIL_HOST")
EMAIL_HOST_USER = retrieve_secret_configuration("EMAIL_USER")
EMAIL_HOST_PASSWORD = retrieve_secret_configuration("EMAIL_PASS")
EMAIL_PORT = retrieve_secret_configuration("EMAIL_PORT")
# TEMPLATE_DIRS += ("",)
# INSTALLED_APPS += ("",)
# Basic Logging Configuration
# https://docs.djangoproject.com/en/1.7/topics/logging/
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': 'RAPID.log',
},
},
'loggers': {
'django.request': {
'handlers': ['file'],
'level': 'INFO',
'propagate': True,
},
},
}
|
nilq/baby-python
|
python
|
from setuptools import find_packages, setup
import os
# load README.md as long_description
long_description = ''
if os.path.exists('README.md'):
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name='XMCD Projection',
version='1.0.0',
packages=find_packages(include=['xmcd_projection']),
description='Library for simulating XMCD projection signal',
long_description=long_description,
long_description_content_type='text/markdown',
author='Luka Skoric',
license='MIT LICENSE',
install_requires=[
'trimesh>=3.9.12',
'numpy==1.20.2',
'matplotlib>=3.4.1',
'numba>=0.53.1',
'joblib>=1.0.1',
'PyQt5>=5.15.4',
'pyqtgraph>=0.11.1',
'scikit-image>=0.18.1',
'scipy>=1.6.2',
'PyOpenGL>=3.1.5',
'cached-property>=1.5.2',
'pandas>=1.0.5',
'meshio>=4.0.16',
'tqdm<=4.46.1'
]
)
|
nilq/baby-python
|
python
|
from random import sample
from time import sleep
lista = []
print('\033[0;34m-'*30)
print(' \033[0;34mJOGOS DA MEGA SENA')
print('\033[0;34m-\033[m'*30)
j = int(input('Quantos jogos você deseja gerar? '))
print('SORTEANDO...')
for i in range(0, j):
ran = sorted(sample(range(1, 60), 6))
lista.append(ran[:])
sleep(2)
print(f'Jogo {i+1}:{lista[i]}')
|
nilq/baby-python
|
python
|
from PyQt5.QtWidgets import QWidget, QMainWindow
from PyQt5.QtCore import Qt
import gi.repository
gi.require_version('Gdk', '3.0')
from gi.repository import Gdk
from utils import Rect
# from keyboard import Keybroad
# from button import Button
# moved inside classes to prevent cyclic import
# Window(parent, title, width=1280, height=720)
#
# Simple class to create PyQt5 windows.
# Default window size is 1280x720 and position the center of the screen.
# If another window is passed as the first argument, when showing the child
# window the parent one will temporarily freeze.
#
# Use:
#
# class App(Window):
# def __init__(self, parent, title):
# super().__init__(parent, title, modal)
#
# primary = App(None, 'This is my primary window')
# secondary = App(primary, 'This is my secondary window')
#
# primary.show()
class Window(QMainWindow):
def __init__(self, parent, title, modal=True, width=1280, height=720):
if parent == None:
super().__init__()
else:
super().__init__(parent)
self.parent = parent
self.title = title
self.modal = modal
self.width = width
self.height = height
screen = Gdk.Screen.get_default()
window_x = (screen.get_width() - width) / 2
window_y = (screen.get_height() - height) / 2
self.setWindowTitle(self.title)
if modal:
self.setWindowModality(Qt.ApplicationModal)
self.setGeometry(window_x, window_y, width, height)
class KeycapConfigWindow(Window):
def __init__(self, parent, width=800, height=600):
super().__init__(
parent,
'tooltip',
'window title',
True,
width,
height)
from button import Button
self.ui = []
Button(self, self.ui, 'save', Rect(742, 560, 10, 10),
lambda: self.hide(), '')
self.show()
|
nilq/baby-python
|
python
|
# Generated by Django 3.1.5 on 2021-02-01 18:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rentalsapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='tenants',
name='amount',
field=models.CharField(default='none', max_length=30),
),
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
def main():
from itertools import accumulate
n = int(input())
# 大きさを基準に昇順で並び替えておく
a = sorted(list(map(int, input().split())))
sum_a = list(accumulate([0] + a))
# 色は最大でN種類
ans = [False for _ in range(n)]
# 初期化:最も大きいモンスターは,確実に最後まで生き残る
ans[n - 1] = True
# KeyInsight
# あるモンスターiが最後まで残る
# =吸収を繰り返していき,自分の2倍以上大きなモンスターが存在しないことを満たす,と言い換える
# サイズの大きなモンスターから順に判定
for i in range(n - 2, -1, -1):
if a[i + 1] <= 2 * a[i]:
if ans[i + 1]:
ans[i] = True
# サイズの小さいモンスターjは,事前に自分より小さいモンスターを全て吸収しておく
# それからモンスターjとモンスターj+1のサイズを比較
elif a[i + 1] <= 2 * sum_a[i + 1]:
if ans[i + 1]:
ans[i] = True
else:
ans[i] = False
print(sum(ans))
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# Python code for 2D random walk.
import json
import sys
import random
import time
import math
import logging
import asyncio
from .DataAggregator import DataAggregator
from .PositioningTag import PositioningTag
from pywalkgen.walk_model import WalkAngleGenerator
from pywalkgen.pub_sub import PubSubAMQP
from pywalkgen.imu import IMU
from pywalkgen.raycast import Particle, StaticMap
from pywalkgen.collision_detection import CollisionDetection
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler('/tmp/walkgen.log')
handler.setLevel(logging.ERROR)
formatter = logging.Formatter('%(levelname)-8s-[%(filename)s:%(lineno)d]-%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
# ========================================= WALK PATTERN GENERATOR ===================================================
class WalkPatternGenerator:
def __init__(self, eventloop, config_file):
"""
Initialize walk pattern generator
Walk pattern generator consists of
:param eventloop: event loop for amqp pub sub
:param config_file: config file
"""
try:
# id assigned to the personnel.
self.walker_id = config_file["id"]
# initialize the start coordinates of the personnel
self.pos = {'x': config_file["start_coordinates"]["x"],
'y': config_file["start_coordinates"]["y"],
'z': config_file["start_coordinates"]["z"]}
walk_attribute = config_file["attribute"]["walk"]
# Walk angle generator for the personnel walk
self.walk_angle_gen = WalkAngleGenerator(mid_point=walk_attribute["sigmoid_attributes"]["mid_point"],
steepness=walk_attribute["sigmoid_attributes"]["steepness"],
max_value=math.radians(
walk_attribute["sigmoid_attributes"]["min_angle"]),
level_shift=math.radians(
walk_attribute["sigmoid_attributes"]["max_angle"]),
walk_direction_factor=walk_attribute["direction_factor"],
walk_angle_deviation_factor=walk_attribute[
"angle_deviation_factor"])
# IMU tag
self.imu_tag = IMU(config_file=config_file)
# Collision detection for static and dynamic obstacles
self.collision = CollisionDetection(scene=StaticMap(config_file=config_file["map"]),
particle=Particle(particle_id=config_file["id"],
x=config_file["start_coordinates"]["x"],
y=config_file["start_coordinates"]["y"]),
env_collision_distance=config_file["attribute"]["collision"][
"distance"]["environment"],
robot_collision_distance=config_file["attribute"]["collision"][
"distance"]["robot"])
# UWB tag
self.uwb_tag = PositioningTag(config=config_file["attribute"]["positioning"]["outliers"])
self.data_aggregators = []
for area in config_file["map"]["area_division"]:
self.data_aggregators.append(DataAggregator(area_config=area))
# set Walk attributes and angle generators
self.max_walk_speed = walk_attribute["max_walk_speed"]
self.walk_dimension = walk_attribute["walk_dimension"]
self.walk_angle = 0
# position related states
self.pos_prev = {'x': self.pos['x'], 'y': self.pos['y'], 'z': self.pos['z']}
self.net_step_size = 0
# time stamp information
self.time_now = 0
self.time_past = 0
# sample time information
self.interval = config_file['attribute']['other']['interval']
self.distance_factor = config_file["attribute"]["walk"]["distance_factor"]
self.distance_in_sample_time = 0
# Publisher
protocol = config_file["protocol"]
self.publishers = []
if protocol["publishers"] is not None:
for publisher in protocol["publishers"]:
if publisher["type"] == "amq":
logger.debug('Setting Up AMQP Publisher for Robot')
self.publishers.append(
PubSubAMQP(
eventloop=eventloop,
config_file=publisher,
binding_suffix=self.walker_id
)
)
else:
logger.error("Provide protocol amq config")
raise AssertionError("Provide protocol amq config")
# Subscriber
self.subscribers = []
if protocol["subscribers"] is not None:
for subscriber in protocol["subscribers"]:
if subscriber["type"] == "amq":
logger.debug('Setting Up AMQP Subcriber for Robot')
if subscriber["exchange"] == "control_exchange":
self.subscribers.append(
PubSubAMQP(
eventloop=eventloop,
config_file=subscriber,
binding_suffix="",
app_callback=self._consume_telemetry_msg
)
)
else:
self.subscribers.append(
PubSubAMQP(
eventloop=eventloop,
config_file=subscriber,
binding_suffix=self.walker_id,
app_callback=self._consume_telemetry_msg
)
)
else:
logger.error("Provide protocol amq config")
raise AssertionError("Provide protocol amq config")
except Exception as e:
logger.critical("unhandled exception", e)
sys.exit(-1)
def _consume_telemetry_msg(self, **kwargs):
"""
consume telemetry messages
:param kwargs: must contain following information
1. exchange_name
2. binding_name
3. message_body
:return: none
"""
# extract message attributes from message
exchange_name = kwargs["exchange_name"]
binding_name = kwargs["binding_name"]
message_body = json.loads(kwargs["message_body"])
# check for matching subscriber with exchange and binding name in all subscribers
for subscriber in self.subscribers:
if subscriber.exchange_name == exchange_name:
if "visual.generator.robot" in binding_name:
# extract robot id from binding name
binding_delimited_array = binding_name.split(".")
robot_id = binding_delimited_array[len(binding_delimited_array) - 1]
msg_attributes = message_body.keys()
# check for must fields in the message attributes
if ("id" in msg_attributes) and ("base" in msg_attributes) \
and ("shoulder" in msg_attributes) and ("elbow" in msg_attributes):
# check if robot id matches with 'id' field in the message
if robot_id == message_body["id"]:
logger.debug(f'Sub: exchange: {exchange_name} msg {message_body}')
# extract information from message body
base_shoulder = [message_body["base"], message_body["shoulder"]]
shoulder_elbow = [message_body["shoulder"], message_body["elbow"]]
elbow_wrist = [message_body["elbow"], message_body["wrist"]]
prefix = "robot_" + message_body["id"]
# update robot in scene for collision detection
self.collision.update_scene(obstacle_id=prefix + "_base_shoulder",
points=base_shoulder,
shape="line")
self.collision.update_scene(obstacle_id=prefix + "_shoulder_elbow",
points=shoulder_elbow,
shape="line")
self.collision.update_scene(obstacle_id=prefix + "_elbow_wrist",
points=elbow_wrist,
shape="line")
return
async def _update3d(self, tdelta=-1):
"""
update walker position in 3D
:param tdelta: time duration between successive updates
:return:
"""
try:
# calculate loop time
if tdelta > 0:
# valid time delta received as input paramter
timedelta = tdelta
elif self.time_now == 0 and self.time_past == 0:
# time delta calculation for first update cycle
self.time_now = time.time()
self.time_past = self.time_now
timedelta = 0.01
else:
# time delta calculation based on run time
self.time_now = time.time()
timedelta = self.time_now - self.time_past
self.time_past = self.time_now
assert (timedelta >= 0), f"Time delta: {timedelta}, can't be negative"
# Calculate Walk angle for next step, and also check if walker is in collision course
ranging, collision_avoidance_msg = self.collision.ranging()
self.walk_angle, collision_decision = \
self.walk_angle_gen.get_walk_angle(angle=self.walk_angle,
ranging=ranging,
velocity=self.net_step_size / timedelta)
step_length = {'x': 0, 'y': 0, 'z': 0}
if collision_decision:
# self.net_step_size = self.net_step_size * 0.2
self.net_step_size = random.uniform(self.net_step_size, self.distance_in_sample_time * 0.6134)
else:
# step size decision
new_distance_in_sample_time = random.uniform(self.distance_in_sample_time,
self.max_walk_speed * timedelta * 0.6134)
self.distance_in_sample_time = (self.distance_in_sample_time * (1 - self.distance_factor)) \
+ (new_distance_in_sample_time * self.distance_factor)
self.net_step_size = random.uniform(self.net_step_size, self.distance_in_sample_time * 0.6134)
# step length in each of the axis
if self.walk_dimension == 1:
step_length['x'] = self.net_step_size * math.cos(self.walk_angle)
step_length['y'] = 0
step_length['z'] = 0
elif self.walk_dimension == 2:
step_length['x'] = self.net_step_size * math.cos(math.radians(self.walk_angle))
step_length['y'] = self.net_step_size * math.sin(math.radians(self.walk_angle))
step_length['z'] = 0
else:
step_length['x'] = self.net_step_size * math.cos(self.walk_angle)
step_length['y'] = self.net_step_size * math.sin(self.walk_angle)
step_length['z'] = math.sin(math.sqrt((math.pow(self.x_step_length, 2) + math.pow(
self.y_step_length, 2)))) # todo write logic for z_step_length based on angle
# walk based on step size calculated in each direction
self.pos['x'] = self.pos_prev['x'] + step_length['x']
self.pos['y'] = self.pos_prev['y'] + step_length['y']
self.pos['z'] = self.pos_prev['z'] + step_length['z']
# update particle's position
self.collision.update_particles(x=self.pos['x'], y=self.pos['y'])
heading = {'ref_heading': {'end': (self.pos['x'], self.pos['y']),
'start': (self.pos_prev['x'], self.pos_prev['y'])}}
# prepare for next iteration
self.pos_prev['x'] = self.pos['x']
self.pos_prev['y'] = self.pos['y']
self.pos_prev['z'] = self.pos['z']
uwb_measurement = self.uwb_tag.get_measurement(ref=[self.pos['x'], self.pos['y'], self.pos['z']])
data_aggregator_id = self.get_area_information(ref=[self.pos['x'], self.pos['y']])
result = {
"measurement": "walk",
"time": time.time_ns(),
"id": self.walker_id,
"data_aggregator_id": data_aggregator_id,
"walk_angle": self.walk_angle,
"x_step_length": step_length['x'],
"y_step_length": step_length['y'],
"z_step_length": step_length['z'],
"x_ref_pos": self.pos['x'],
"y_ref_pos": self.pos['y'],
"z_ref_pos": self.pos['z'],
"x_uwb_pos": uwb_measurement[0],
"y_uwb_pos": uwb_measurement[1],
"z_uwb_pos": uwb_measurement[2],
"view": ranging
}
result.update(heading)
imu_result = self.imu_tag.update(cur_position=result, tdelta=timedelta)
result.update(imu_result)
result.update({"timestamp": round(time.time() * 1000)})
plm_result = {
"id": result["id"],
"data_aggregator_id": result["data_aggregator_id"],
"x_uwb_pos": result["x_uwb_pos"],
"y_uwb_pos": result["y_uwb_pos"],
"z_uwb_pos": result["z_uwb_pos"],
'x_imu_vel': result['x_imu_vel'],
'y_imu_vel': result['y_imu_vel'],
'z_imu_vel': result['z_imu_vel'],
"timestamp": result['timestamp']
}
return result, plm_result
except Exception as e:
logger.critical("unhandled exception", e)
sys.exit(-1)
async def publish(self, exchange_name, msg, external_binding_suffix=None):
'''
publishes amqp message
:param exchange_name: name of amqp exchange
:param msg: message to be published
:param external_binding_suffix: binding suffix. suffix is appended to the end of binding namedd
:return:
'''
for publisher in self.publishers:
if exchange_name == publisher.exchange_name:
await publisher.publish(message_content=msg, external_binding_suffix=external_binding_suffix)
logger.debug(f'Pub: exchange: {exchange_name} msg {msg}')
async def connect(self):
"""
connects amqp publishers and subscribers
:return:
"""
for publisher in self.publishers:
await publisher.connect()
for subscriber in self.subscribers:
await subscriber.connect(mode="subscriber")
async def update(self):
"""
update walk generator.
Note This function need to be called in a loop every update cycle
:param binding_key: binding key name (optional) used when other than default binding key
:return:
"""
result = dict()
if self.interval >= 0:
all_result, plm_result = await self._update3d()
result.update(all_result)
await self.publish(exchange_name='generator_personnel', msg=json.dumps(result).encode())
# sleep until its time for next sample
if self.interval >= 0:
await asyncio.sleep(delay=self.interval)
else:
await asyncio.sleep(delay=0)
def get_states(self):
return {"x_ref_pos": self.pos['x'], "y_ref_pos ": self.pos['y'], "z_ref_pos": self.pos['z']}
def get_area_information(self, ref):
for data_aggregator in self.data_aggregators:
if data_aggregator.locate(point=[ref[0], ref[1]]):
return data_aggregator.id
return None
|
nilq/baby-python
|
python
|
from tkinter import*
from tkinter import messagebox
from PIL import ImageTk
import sqlite3
root=Tk()
root.geometry("1196x600")
root.title("Hotel Management System")
#bg=PhotoImage(file ="D:\Python\HotelManagement\Background.png")
#bglabel=Label(root,image=bg)
#bglabel.place(x=0,y=0)
backimage=PhotoImage("D:\Python\HotelManagement\Back.png")
#====database
conn=sqlite3.connect('Hotel_Management.db')
c=conn.cursor()
# c.execute("""CREATE TABLE room(
# Name varchar,
# Phone_number varchar,
# address varchar,
# adhar varchar,
# occupants varchar,
# category varchar)""")
conn.commit()
conn.close()
class BookingPage:
global root
global backimage
global confirm_function
def __init__(self,root):
self.root=root
root.geometry("1196x600")
root.title("Room Booking")
#self.bag=PhotoImage(file ="D:\Python\HotelManagement\Background.png")
#self.bglabel=Label(root,image=self.bag)
#self.bglabel.place(x=0,y=0)
self.pane=Canvas(root,bg="White",height=1000,width=800)
self.pane.place(relx=0.5,y=500,anchor=CENTER)
self.label=Label(root,text="Availability",bg="White",font=("Product Sans",20)).place(relx=0.5,rely=0.05,anchor=CENTER)
#====================================================================================================================================================================================================
# Getting the number of occupants
self.Occupants=StringVar()
OccupantLabel=Label(root,text="Select Number of Occupants",bg="White",font=("Product Sans",12)).place(relx=0.3,rely=0.55)
self.OccupantSelect=OptionMenu(root,self.Occupants,*["1","2","3"],command=self.NumOfOcc())
self.OccupantSelect.config(indicatoron=0)
self.OccupantSelect.configure(bg="White",highlightthickness=0,highlightbackground="White",borderwidth=0)
self.OccupantSelect.place(relx=0.7,rely=0.55,anchor=CENTER)
self.Occupants.set("1")
#====================================================================================================================================================================================================
# choosing the category of the room
self.Category=StringVar()
self.CategoryLabel=Label(root,text="Select Category",bg="White",font=("Product Sans",12)).place(relx=0.3,rely=0.65)
self.CategorySelect=OptionMenu(root,self.Category,*["A/C","Non A/C","Presidential Suite"])
self.CategorySelect.config(indicatoron=0)
self.CategorySelect.configure(bg="White",highlightthickness=0,highlightbackground="Grey",borderwidth=0)
self.CategorySelect.place(relx=0.6,rely=0.65)
self.Category.set("A/C")
#====================================================================================================================================================================================================
# Info label
self.InfoLabel=Label(root,bg="White",font=("Product Sans",12),text="")
self.InfoLabel.place(relx=0.5,rely=0.5,anchor=CENTER)
# Price Lablel
self.PriceLabel=Label(root,bg="White",font=("Product Sans",12))
self.PriceLabel.place(relx=0.5,rely=0.6,anchor=CENTER)
#====================================================================================================================================================================================================
# Buttons
self.IDProof=StringVar()
self.IDProof.set("Aadhar")
self.label=Label(root,text="Enter Customer Details",bg="White",font=("Product Sans",20)).place(relx=0.5,rely=0.05,anchor=CENTER)
self.name=Label(root,text="Name",font=("Product Sans",12),bg="White").place(relx=0.3,rely=0.1)
self.Number=Label(root,text="Phone Number",font=("Product Sans",12),bg="White").place(relx=0.3,rely=0.2)
self.Address=Label(root,text="Address",font=("Product Sans",12),bg="White").place(relx=0.3,rely=0.3)
self.ID=OptionMenu(root,self.IDProof,*["Aadhar","Driving Licence","Other"])
self.ID.config(indicatoron=0,font=("Product Sans",12))
self.ID.configure(bg="White",highlightthickness=0,highlightbackground="Grey",borderwidth=0)
self.ID.place(relx=0.6,rely=0.4)
self.IDLabel=Label(root,text="ID Proof",bg="White",font=("Product Sans",12)).place(relx=0.3,rely=0.4)
self.EnterName=Entry(root,font=("Product Sans",12)).place(relx=0.6,rely=0.1)
self.EnterNumber=Entry(root,font=("Product Sans",12)).place(relx=0.6,rely=0.2)
self.EnterAddress=Entry(root,font=("Product Sans",12)).place(relx=0.6,rely=0.3)
self.EnterIdProof=Entry(root,font=("Product Sans",12)).place(relx=0.6,rely=0.45)
self.bookbutton=Button(root,text="Confirm",command=self.confirm_function)
self.bookbutton.place(relx=0.5,rely=0.95,anchor=CENTER)
self.Days=Label(root,text="No of days",font=("Product Sans",12),bg="White").place(relx=0.3,rely=0.75)
self.Day=IntVar()
self.DaysSelect=OptionMenu(root,self.Day,*[1,2,3,4])
self.DaysSelect.config(indicatoron=0)
self.DaysSelect.configure(bg="White",highlightthickness=0,highlightbackground="Grey",borderwidth=0)
self.DaysSelect.place(relx=0.6,rely=0.75)
self.Day.set(1)
self.subtotal=Label(root,bg="white",font=("Product Sans",12))
self.subtotal.place(relx=0.5,rely=0.85,anchor=CENTER)
# def Book(self): #Book Button Command
# self.RoomCategory=self.Category.get()
# self.days=self.Day.get()
# if self.RoomCategory=="Non A/C":
# price=1000
# elif self.RoomCategory=="A/C":
# price=1500
# elif self.RoomCategory=="Presidential Suite":
# price=2000
# self.totalPrice=price*self.days
# self.totalPrice=str(self.totalPrice)
# self.TXT=("Your subtotal will be "+self.totalPrice )
# self.subtotal.config(text=self.TXT)
def ShowInfo(self):
self.InfoLabel.config(text="Info will be shown")
self.ShowBook()
def NumOfOcc(self):
NumberOfOccupants=self.Occupants.get()
return NumberOfOccupants
def RoomCategoryFun(self,Category):
RoomCategory=self.Category.get()
if RoomCategory=="Non A/C":
self.PriceLabel.config(text="Price: 1000")
elif RoomCategory=="A/C":
self.PriceLabel.config(text="Price: 1500")
elif RoomCategory=="Presidential Suite":
self.PriceLabel.config(text="Price: 2000")
def Back(self):
for widget in root.winfo_children():
widget.destroy()
SplashScreen(root)
def FinalPage(self):
for widget in root.winfo_children():
widget.destroy()
UserInfo(root)
# def BillingPage(self):
# self.newWindow = Toplevel(self.root)
# self.app = BillingPage(self.newWindow)
def confirm_function(self):
conn=sqlite3.connect('Hotel_Management.db')
c=conn.cursor()
c.execute("INSERT INTO room VALUES(:Name,:Phone_number,:address,:adhar,:occupants,:category)",
{
'Name':self.EnterName.get(),
'Phone_number':self.EnterNumber.get(),
'address':self.EnterAddress.get(),
'adhar':self.EnterIdProof.get(),
'occupants':self.Occupants.get(),
'category':self.Category.get()
})
conn.commit()
conn.close()
def delete(self):
self.EnterName.delete(0,END)
self.EnterAddress.delete(0,END)
self.EnterIdProof.delete(0,END)
self.En.delete(0,END)
class BillingPage:
global root
global backimage
def __init__(self,root):
self.root=root
#self.bg=PhotoImage(file ="D:\Python\HotelManagement\Background.png")
#self.bglabel=Label(root,image=bg)
#self.bglabel.place(x=0,y=0)
#===========================================================================================================================================================================================================================
self.label5=Label(root,text='BILL PAYMENT',borderwidth=1,relief='solid',width=12,height=3)
self.label5.pack()
self.label5.place(x=460,y=30)
self.label6 = Label(root, borderwidth=5, relief='solid', width=50, height=20)
self.label6.pack()
self.label6.place(x=500, y=120)
self.pay=StringVar()
self.payno=IntVar()
self.r1=Radiobutton(root,text='PAY WITH CREDIT CARD',variable=self.payno,value=1)
self.r1.pack()
self.r1.place(x=20,y=100)
self.r2 = Radiobutton(root, text='CASH', variable=self.payno, value=2)
self.r2.pack()
self.r2.place(x=20,y=170)
self.r3 = Radiobutton(root, text='ONLINE PAYMENT', variable=self.payno, value=3)
self.r3.pack()
self.r3.place(x=20,y=240)
def fun_pay(self):
self.messagebox.showinfo('Hello','THANKS FOR CHOOSING\nOUR HOTEL\n\n\nPAYMENT DONE SUCCESSFULLY')
self.b = Label(root, text="PAY NOW", foreground="blue", bg='pink', activebackground="red", width=10, height=2)
self.b.pack()
self.b.place(x=50,y=420)
self.backbutton=Button(root,text="Back",image=backimage,command=self.Back,compound=LEFT)
self.backbutton.place(relx=0.1,rely=0.1,anchor=CENTER)
def Back(self):
for widget in root.winfo_children():
widget.destroy()
SplashScreen(root)
class Login:
def WelcomePage(self):
for widget in root.winfo_children():
widget.destroy()
SplashScreen(root)
def __init__(self,root):
self.root=root
self.root.title("Admin login")
self.root.geometry("1169x600")
#====login frame====
root=Frame(self.root,bg="white")
root.place(x=100,y=150,height=400,width=500)
title=Label(root,text="Admin Login",font=("Impact",35,"bold"),fg="gray",bg="white").place(x=90,y=40)
desc=Label(root,text="Fill username and password here",font=("Goudy old style",15,"bold"),fg="gray",bg="white").place(x=90,y=100)
#====Username module====
lbl_username=Label(root,text="Username",font=("Impact",15),fg="gray",bg="white").place(x=90,y=140)
self.txt_user=Entry(root,font=("times new roman",15),bg="lightgray")
self.txt_user.place(x=90,y=170, width=350, height=35)
#====Password module====
lbl_password=Label(root,text="Password",font=("Impact",15),fg="gray",bg="white").place(x=90,y=210)
self.txt_pass=Entry(root,show="*",font=("times new roman",15),bg="lightgray")
self.txt_pass.place(x=90,y=240, width=350, height=35)
#====Button====
forget_btn=Button(root,text="Forgot password?",bg="white",fg="gray",bd=0,font=("times new roman",12)).place(x=90,y=280)
login_btn=Button(root,command=self.login_function,text="login",bg="white",fg="gray",font=("times new roman",15)).place(x=90,y=320)
def login_function(self):
if self.txt_user.get()=="" or self.txt_pass.get()=="":
messagebox.showerror("Error","All fields are required", parent=self.root)
elif self.txt_user.get()!="Admin" or self.txt_pass.get()!="1234":
messagebox.showerror("Error","Invalid Username/password", parent=self.root)
else:
messagebox.showinfo("Welcome","Welcome Admin")
self.WelcomePage()
class SplashScreen:
global root
def Booking(self):
for widget in root.winfo_children():
widget.destroy()
BookingPage(root)
def Billing(self):
for widget in root.winfo_children():
widget.destroy()
BillingPage(root)
def Cab(self):
for widget in root.winfo_children():
widget.destroy()
SplashScreen(root)
def LogOut(self):
for widget in root.winfo_children():
widget.destroy()
Login(root)
def __init__(self,root):
self.root=root
#self.root.title("Login Page")
#self.bag=PhotoImage(file ="D:\Python\HotelManagement\Background.png")
#self.bglabel=Label(root,image=self.bag)
#self.bglabel.place(x=0,y=0)
#frames code
#Labels
title=Label(root,text="MAIN MENU",font=("Arial black",45,"bold"),fg="blue",bg="sky blue").place(x=220,y=0)
# label_roomAvail = Label(root, text="ROOM AVAILABILITY",font=("Goudy old style",20,"bold"),fg="black",bg="white").place(x=30,y=120)
# label_checkOUT = Label(root,text="CHECK OUT",font=("Goudy old style",20,"bold"),fg="black",bg="white").place(x=550,y=120)
# label_cabBook = Label(root,text="BOOK A CAB",font=("Goudy old style",20,"bold"),fg="black",bg="white").place(x=30,y=350)
# label_billing = Label(root,text="BILLING",font=("Goudy old style",20,"bold"),fg="black",bg="white").place(x=550,y=350)
#BUTTONS CODE
roomAvail=Button(root,text="ROOM AVAILABILITY",bg="white",fg="black",bd=0,font=("Goudy old style",28),command=self.Booking).place(x=30,y=140)
checkOut = Button(root,text="CHECK-OUT",bg="white",fg="black",bd=0,font=("Goudy old style",28),command=self.Billing).place(x=530,y=140)
#cabBook = Button(root,text="CAB BOOK",bg="white",fg="black",bd=0,font=("Goudy old style",28),command=self.Cab).place(x=100,y=310)
billing = Button(root,text="Log Out",bg="white",fg="black",bd=0,font=("Goudy old style",28),command=self.LogOut).place(x=560,y=310)
class UserInfo:
global root
global backimage
def __init__(self,root):
self.root=root
root.geometry("1196x600")
root.title("Room Booking")
#self.bag=PhotoImage(file ="D:\Python\HotelManagement\Background.png")
#self.bglabel=Label(root,image=self.bag)
#self.bglabel.place(x=0,y=0)
self.pane=Canvas(root,bg="White",height=1000,width=800)
self.pane.place(relx=0.5,y=500,anchor=CENTER)
self.IDProof=StringVar()
self.IDProof.set("Aadhar")
self.label=Label(root,text="Enter Customer Details",bg="White",font=("Product Sans",20)).place(relx=0.5,rely=0.05,anchor=CENTER)
self.name=Label(root,text="Name",font=("Product Sans",14),bg="White").place(relx=0.3,rely=0.2)
self.Number=Label(root,text="Phone Number",font=("Product Sans",14),bg="White").place(relx=0.3,rely=0.3)
self.Address=Label(root,text="Address",font=("Product Sans",14),bg="White").place(relx=0.3,rely=0.4)
self.ID=OptionMenu(root,self.IDProof,*["Aadhar","Driving Licence","Other"])
self.ID.config(indicatoron=0,font=("Product Sans",12))
self.ID.configure(bg="White",highlightthickness=0,highlightbackground="Grey",borderwidth=0)
self.ID.place(relx=0.6,rely=0.5)
self.IDLabel=Label(root,text="ID Proof",bg="White",font=("Product Sans",14)).place(relx=0.3,rely=0.5)
self.EnterName=Entry(root,font=("Product Sans",12)).place(relx=0.6,rely=0.2)
self.EnterNumber=Entry(root,font=("Product Sans",12)).place(relx=0.6,rely=0.3)
self.EnterAddress=Entry(root,font=("Product Sans",12)).place(relx=0.6,rely=0.4)
self.EnterIdProof=Entry(root,font=("Product Sans",12)).place(relx=0.6,rely=0.6)
self.bookbutton=Button(root,text="Confirm",command=self.Book)
self.bookbutton.place(relx=0.5,rely=0.9,anchor=CENTER)
def Book(self): #Book Button Command
pass
# self.name=Label(root,text="Name",font=("Product Sans",14),bg="White").place(relx=0.3,rely=0.3,anchor=CENTER)
# class Cab:
# global root
# def __init__(self):
# root=Tk()
# root.geometry("1200x600")
# self.f1=Frame(root,bg="black",borderwidth=6,relief=RIDGE)
# self.f1.pack(side=TOP,fill="y",pady=20)
# self.l1=Label(self.f1,text="WELCOME TO OUR CAB SERVICES",fg="red",padx=13,pady=13,font="comicsansms 25 bold",borderwidth=3)
# self.l1.pack(fill="x")
# self.f2=Frame(root,bg="PINK",borderwidth=6,relief=RIDGE)
# self.f2.pack(side=LEFT,fill=Y,pady=20)
# self.l2=Label(f2,text="CUSTOMER DETAILS ",fg="RED",padx=30,pady=30,font="comicsansms 19 bold",borderwidth=3)
# self.l2.grid(row=0,column=0)
# self.f3=Frame(root,bg="PINK",borderwidth=6,relief=RIDGE)
# self.f3.pack(fill=Y,side=LEFT,padx=30,pady=20)
# self.l3=Label(f3,text="BOOKING DETAILS",fg="RED",padx=30,pady=30,font="comicsansms 19 bold",borderwidth=3)
# self.l3.grid(row=0,column=0)
# self.f4=Frame(root,bg="pink",borderwidth=6,relief=RIDGE)
# self.f4.pack(fill=Y,side=LEFT,pady=20)
# self.l4=Label(f4,text="RECEIPT",fg="RED",padx=30,pady=30,font="comicsansms 19 bold",borderwidth=3)
# self.l4.grid()
# #text for 2nd frame
# self.name=Label(f2,text="NAME",font="comicsansma 15 bold")
# self.gender=Label(f2,text="GENDER",font="comicsansma 15 bold")
# self.address=Label(f2,text="ADDRESS",font="comicsansma 15 bold")
# self.mobile=Label(f2,text="MOBILE",font="comicsansma 15 bold")
# self.email=Label(f2,text="EMAIL",font="comicsansma 15 bold")
# #pack text for 2nd frame
# self.name.grid(row=1,column=0,sticky=W,pady=2,padx=2)
# self.gender.grid(row=2,column=0,sticky=W,pady=6,padx=2)
# self.address.grid(row=3,column=0,sticky=W,pady=6,padx=2)
# self.mobile.grid(row=4,column=0,sticky=W,pady=6,padx=2)
# self.email.grid(row=5,column=0,sticky=W,pady=6,padx=2)
# #variables for 2nd frame
# """namevalue=StringVar()
# gendervalue=StringVar()
# addressvalue=StringVar()
# mobilevalue=StringVar()
# emailvalue=StringVar()"""
# #entries for 2nd frame
# self.nameentry=Entry(f2)
# self.genderentry=Entry(f2)
# self.addressentry=Entry(f2)
# self.mobileentry=Entry(f2)
# self.emailentry=Entry(f2)
# #packing entries for 2nd frame
# self.nameentry.grid(row=1,column=0,pady=2)
# self.genderentry.grid(row=2,column=0,pady=6)
# self.addressentry.grid(row=3,column=0,pady=6)
# self.mobileentry.grid(row=4,column=0,pady=6,padx=4)
# self.emailentry.grid(row=5,column=0,pady=6)
# #buttons for 2nd frame
# self.b1=Button(f2, text="SUBMIT", foreground="RED", bg='sky blue', activebackground="YELLOW", width=12, height=2)
# self.b1.grid()
# self.b1.place(x=50,y=410,anchor=S)
# self.b2=Button(f2, text="CANCEL", foreground="RED", bg='sky blue', activebackground="YELLOW", width=12, height=2)
# self.b2.grid()
# self.b2.place(x=270,y=410,anchor=S)
# #text for 3rd frame
# self.pickup=Label(f3,text="PICKUP",font="comicsansma 12 bold")
# self.drop=Label(f3,text="DROP",font="comicsansma 12 bold")
# self.pooling=Label(f3,text="POOLING",font="comicsansma 12 bold")
# self.luggage=Label(f3,text="LUGGAGE",font="comicsansma 12 bold")
# self.car=Label(f3,text="CAR TYPE",font="comicsansma 12 bold")
# #pack text for 3RD frame
# self.pickup.grid(row=1,column=0,sticky=W,pady=6,padx=2)
# self.drop.grid(row=2,column=0,sticky=W,pady=6,padx=2)
# self.pooling.grid(row=3,column=0,sticky=W,pady=6,padx=2)
# self.luggage.grid(row=4,column=0,sticky=W,pady=6,padx=2)
# self.car.grid(row=5,column=0,sticky=W,pady=6,padx=2)
# #entries for 3RD frame
# self.pickupentry=Entry(f3)
# self.dropentry=Entry(f3)
# self.poolingentry=Entry(f3)
# self.luggageentry=Entry(f3)
# self.carentry=Entry(f3)
# #packing entries for 3RD frame
# self.pickupentry.grid(row=1,column=0,pady=2)
# self.dropentry.grid(row=2,column=0,pady=6)
# self.poolingentry.grid(row=3,column=0,pady=16,padx=16)
# self.luggageentry.grid(row=4,column=0,pady=6,padx=4)
# self.carentry.grid(row=5,column=0,pady=6)
# #buttons for 3rd frame
# self.b1=Button(f3, text="SUBMIT", foreground="RED", bg='sky blue', activebackground="YELLOW", width=12, height=2)
# self.b1.grid()
# self.b1.place(x=50,y=410,anchor=S)
# self.b2=Button(f3, text="CANCEL", foreground="RED", bg='sky blue', activebackground="YELLOW", width=12, height=2)
# self.b2.grid()
# self.b2.place(x=240,y=410,anchor=S)
# #buttons for 4th frame
# self.b1=Button(f4, text="TOTAL", foreground="RED", bg='sky blue', activebackground="YELLOW", width=12, height=2)
# self.b1.grid()
# self.b1.place(x=50,y=250,anchor=S)
# self.b2=Button(f4, text="RECIEPT", foreground="RED", bg='sky blue', activebackground="YELLOW", width=12, height=2)
# self.b2.grid()
# self.b2.place(x=50,y=300,anchor=S)
# self.b3=Button(f4, text="RESET", foreground="RED", bg='sky blue', activebackground="YELLOW", width=12, height=2)
# self.b3.grid()
# self.b3.place(x=50,y=350,anchor=S)
# self.b4=Button(f4, text="EXIT", foreground="RED", bg='sky blue', activebackground="YELLOW", width=12, height=2)
# self.b4.grid()
# self.b4.place(x=50,y=400,anchor=S)
Login(root)
root.mainloop()
|
nilq/baby-python
|
python
|
import os
import pytest
from cctbx import sgtbx, uctbx
from dxtbx.serialize import load
import dials.command_line.cosym as dials_cosym
from dials.algorithms.symmetry.cosym._generate_test_data import (
generate_experiments_reflections,
)
from dials.array_family import flex
from dials.util import Sorry
@pytest.mark.parametrize(
"space_group,engine", [(None, "scitbx"), ("P 1", "scipy"), ("P 4", "scipy")]
)
def test_cosym(dials_data, run_in_tmpdir, space_group, engine):
mcp = dials_data("multi_crystal_proteinase_k")
args = ["space_group=" + str(space_group), "seed=0", f"engine={engine}"]
for i in [1, 2, 3, 4, 5, 7, 8, 10]:
args.append(mcp.join("experiments_%d.json" % i).strpath)
args.append(mcp.join("reflections_%d.pickle" % i).strpath)
dials_cosym.run(args=args)
assert os.path.isfile("symmetrized.refl")
assert os.path.isfile("symmetrized.expt")
experiments = load.experiment_list("symmetrized.expt", check_format=False)
if space_group is None:
assert (
experiments[0].crystal.get_space_group().type().lookup_symbol() == "P 4 2 2"
)
else:
assert (
experiments[0].crystal.get_space_group().type().lookup_symbol()
== space_group
)
joint_reflections = flex.reflection_table.from_file("symmetrized.refl")
# check that there are 8 unique id and imageset_ids, and that these
# correctly correspond to each experiment
assert len(set(joint_reflections["id"])) == 8
assert len(set(joint_reflections["imageset_id"])) == 8
for id_ in range(8):
sel = joint_reflections["id"] == id_
assert set(joint_reflections["imageset_id"].select(sel)) == {id_}
def test_cosym_partial_dataset(dials_data, run_in_tmpdir):
"""Test how cosym handles partial/bad datasets."""
mcp = dials_data("multi_crystal_proteinase_k")
args = []
for i in [1, 2]:
args.append(mcp.join("experiments_%d.json" % i).strpath)
args.append(mcp.join("reflections_%d.pickle" % i).strpath)
# Make one dataset that will be removed in prefiltering
r = flex.reflection_table.from_file(mcp.join("reflections_8.pickle").strpath)
r["partiality"] = flex.double(r.size(), 0.1)
r.as_file("renamed.refl")
args.append("renamed.refl")
args.append(mcp.join("experiments_8.json").strpath)
# Add another good dataset at the end of the input list
args.append(mcp.join("experiments_10.json").strpath)
args.append(mcp.join("reflections_10.pickle").strpath)
dials_cosym.run(args=args)
assert os.path.exists("symmetrized.refl")
assert os.path.exists("symmetrized.expt")
experiments = load.experiment_list("symmetrized.expt", check_format=False)
assert len(experiments) == 3
def test_cosym_partial_dataset_raises_sorry(dials_data, run_in_tmpdir, capsys):
"""Test how cosym handles partial/bad datasets."""
mcp = dials_data("multi_crystal_proteinase_k")
args = ["renamed.refl", mcp.join("experiments_8.json").strpath]
r2 = flex.reflection_table.from_file(mcp.join("reflections_10.pickle").strpath)
r2["partiality"] = flex.double(r2.size(), 0.1)
r2.as_file("renamed2.refl")
args.append("renamed2.refl")
args.append(mcp.join("experiments_10.json").strpath)
with pytest.raises(Sorry):
dials_cosym.run(args=args)
@pytest.mark.parametrize(
(
"space_group",
"unit_cell",
"dimensions",
"sample_size",
"use_known_space_group",
"use_known_lattice_group",
),
[
("P2", None, None, 10, False, False),
("P3", None, None, 20, False, False),
("I23", None, 2, 10, False, False),
("P422", (79, 79, 37, 90, 90, 90), None, 10, True, False),
("P321", (59.39, 59.39, 28.35, 90, 90, 120), None, 5, False, False),
],
)
def test_synthetic(
space_group,
unit_cell,
dimensions,
sample_size,
use_known_space_group,
use_known_lattice_group,
run_in_tmpdir,
):
space_group = sgtbx.space_group_info(space_group).group()
if unit_cell is not None:
unit_cell = uctbx.unit_cell(unit_cell)
experiments, reflections, _ = generate_experiments_reflections(
space_group=space_group,
unit_cell=unit_cell,
unit_cell_volume=10000,
sample_size=sample_size,
map_to_p1=True,
d_min=1.5,
)
experiments.as_json("tmp.expt")
expt_file = "tmp.expt"
joint_table = flex.reflection_table()
for r in reflections:
joint_table.extend(r)
joint_table.as_file("tmp.refl")
refl_file = "tmp.refl"
args = [
expt_file,
refl_file,
"output.experiments=symmetrized.expt",
"output.reflections=symmetrized.refl",
"output.html=cosym.html",
"output.json=cosym.json",
]
if use_known_space_group:
args.append(f"space_group={space_group.info()}")
if use_known_lattice_group:
args.append(f"lattice_group={space_group.info()}")
if dimensions is not None:
args.append(f"dimensions={dimensions}")
dials_cosym.run(args=args)
assert os.path.isfile("symmetrized.refl")
assert os.path.isfile("symmetrized.expt")
assert os.path.isfile("cosym.html")
assert os.path.isfile("cosym.json")
cosym_expts = load.experiment_list("symmetrized.expt", check_format=False)
assert len(cosym_expts) == len(experiments)
for expt in cosym_expts:
if unit_cell is not None:
assert expt.crystal.get_unit_cell().parameters() == pytest.approx(
unit_cell.parameters()
)
if (
str(expt.crystal.get_space_group().info()) == "P 6 2 2"
and str(space_group.info()) == "P 3 2 1"
):
# This is fine
continue
assert str(expt.crystal.get_space_group().info()) == str(space_group.info())
assert expt.crystal.get_space_group() == space_group
|
nilq/baby-python
|
python
|
def print_section_header(header: str) -> None:
print(f"========================================================================")
print(f"=== {header} ")
def print_section_finish() -> None:
print(f"=== SUCCESS\n")
|
nilq/baby-python
|
python
|
names = []
posx = []
posy = []
caps = []
with open('sink_cap.txt') as f:
for line in f:
tokens = line.split()
names.append(tokens[0])
posx.append(float(tokens[1]))
posy.append(float(tokens[2]))
caps.append(float(tokens[3]))
minx = min(posx)
miny = min(posy)
maxx = max(posx)
maxy = max(posy)
#print(" - minx = " + str(minx))
#print(" - miny = " + str(miny))
#print(" - maxx = " + str(maxx))
#print(" - maxy = " + str(maxy))
with open('sink_cap.txt', 'w') as f:
for i in range(len(posx)):
f.write(names[i] + " " + str(posx[i]-minx) + " " + str(posy[i]-miny) + " " + str(caps[i]) + "\n")
with open('blks_tmp2.txt') as f1, open('blks.txt', 'w') as f2:
for line in f1:
tokens = line.split()
x1 = float(tokens[0]) - minx
y1 = float(tokens[1]) - miny
x2 = float(tokens[2]) - minx
y2 = float(tokens[3]) - miny
f2.write(str(x1) + " " + str(y1) + " " + str(x2) + " " + str(y2) + "\n")
with open('die-size.txt', 'w') as f:
f.write(str(maxx-minx) + " " + str(maxy-miny) + " " + str(minx) + " " + str(miny))
#print("../bin/genHtree -w " + str(maxx-minx) + " -h " + str(maxy-miny) + " -n 256 -s 20 -tech 16")
|
nilq/baby-python
|
python
|
from block_model.controller.block_model import BlockModel
from drillhole.controller.composites import Composites
from geometry.controller.ellipsoid import Ellipsoid
from kriging.controller.search_ellipsoid import SearchEllipsoid
from kriging.controller.point_kriging import PointKriging
from variogram.controller.model import Model
from variogram.controller.structure import Structure
from common.discretize import *
from common.rotation import *
blockPath = '../../GP_Data/cy17_spc_assays_pvo_entry_ug.csv'
# blockPath = '../../GP_Data/test_kriging.csv'
ugVarBlock = 'ugcutPVO'
blockColumns = [(ugVarBlock, int)]
var = 'cut'
ugVarComp = 'ugcut' # columna que contiene ug de los datos de sondaje
compColumns = [(var, float), (ugVarComp, float)]
compPath = '../../GP_Data/cy17_spc_assays_rl6_entry.csv'
# compPath = '../../GP_Data/cy17_spc_au_rl6_entry.csv'
def run():
blockModel, composites, ellipsoid = getObjects()
ugs = [10, 20, 30, 40, 50, 51, 60, 70, 71, 80]
for ug in ugs:
model = getModel(ug)
if model is not None:
blocks = blockModel.applyFilter('"%s" == %d' % (ugVarBlock, ug))
comps = composites.applyFilter('"%s" == %d' % (ugVarComp, ug))
estimate(blocks, comps, ellipsoid, model)
exportBlockModel(blockModel)
def getModel(ug):
# modelo de variograma
if ug == 10:
nugget = 0.250
s1 = Structure(Structure.EXPONENTIAL, 0.480, Ellipsoid(19, 19, 19, 0, 0, 0))
s2 = Structure(Structure.EXPONENTIAL, 0.270, Ellipsoid(436, 436, 436, 0, 0, 0))
structures = [s1, s2]
return Model(nugget, structures)
elif ug == 20:
nugget = 0.250
s1 = Structure(Structure.EXPONENTIAL, 0.370, Ellipsoid(16, 22, 5, 20, 0, 0))
s2 = Structure(Structure.EXPONENTIAL, 0.380, Ellipsoid(177, 97, 27, 20, 0, 0))
structures = [s1, s2]
return Model(nugget, structures)
elif ug == 30:
nugget = 0.290
s1 = Structure(Structure.SPHERIC, 0.320, Ellipsoid(47, 103, 20, 30, 0, 0))
s2 = Structure(Structure.SPHERIC, 0.390, Ellipsoid(601, 500, 32, 30, 0, 0))
structures = [s1, s2]
return Model(nugget, structures)
elif ug == 40:
nugget = 0.220
s1 = Structure(Structure.SPHERIC, 0.420, Ellipsoid(55, 20, 11, 40, 0, 0))
s2 = Structure(Structure.SPHERIC, 0.360, Ellipsoid(447, 183, 26, 40, 0, 0))
structures = [s1, s2]
return Model(nugget, structures)
elif ug == 50:
nugget = 0.180
s1 = Structure(Structure.SPHERIC, 0.390, Ellipsoid(16, 29, 11, 40, 0, 0))
s2 = Structure(Structure.SPHERIC, 0.430, Ellipsoid(144, 93, 145, 40, 0, 0))
structures = [s1, s2]
return Model(nugget, structures)
elif ug == 51:
nugget = 0.140
s1 = Structure(Structure.SPHERIC, 0.390, Ellipsoid(14, 37, 28, 35, 0, 0))
s2 = Structure(Structure.SPHERIC, 0.470, Ellipsoid(343, 183, 125, 35, 0, 0))
structures = [s1, s2]
return Model(nugget, structures)
elif ug == 60:
nugget = 0.150
s1 = Structure(Structure.SPHERIC, 0.550, Ellipsoid(14.8, 10.3, 11.9, 10, 0, 0))
s2 = Structure(Structure.SPHERIC, 0.300, Ellipsoid(954.5, 98.9, 16337.9, 10, 0, 0))
structures = [s1, s2]
return Model(nugget, structures)
elif ug == 70:
nugget = 0.150
s1 = Structure(Structure.EXPONENTIAL, 0.444, Ellipsoid(18.6, 15.1, 18.1, 10, 0, 0))
s2 = Structure(Structure.EXPONENTIAL, 0.406, Ellipsoid(18.8, 14.9, 208.9, 10, 0, 0))
structures = [s1, s2]
return Model(nugget, structures)
elif ug == 71:
nugget = 0.200
s1 = Structure(Structure.EXPONENTIAL, 0.441, Ellipsoid(11.1, 7.9, 9.8, 20, 0, 0))
s2 = Structure(Structure.EXPONENTIAL, 0.359, Ellipsoid(143.7, 161.0, 3777.8, 20, 0, 0))
structures = [s1, s2]
return Model(nugget, structures)
return None
def estimate(blocks, composites, ellipsoid, model):
# se rotan los compósitos
rotatedPoints = rotateComposites(composites, ellipsoid.rotationMatrix)
# se crea un diccionario para acceder a las muestras según su coordenada rotada
compositesByRotatedPoint = dict([(tuple(rotatedPoints[i]), composites[i])
for i in range(len(rotatedPoints))])
# se discretiza el espacio
discretizedPoints = discretizePoints(rotatedPoints,
ellipsoid.major,
ellipsoid.medium,
ellipsoid.minor)
kriging = PointKriging(ellipsoid, model)
cap = 2
print('Estimando modelo de bloques:')
for block in blocks:
# se rota el punto que se quiere estimar
rx, ry, rz = rotateBlock(block, ellipsoid.rotationMatrix)
# se obtienen los compósitos cercanos al centro del bloque
points = ellipsoid.searchPointsInDiscretizedPoints((rx, ry, rz), discretizedPoints)
if len(points) > 0:
# se ordenan los puntos por distancia al bloque
points = sorted(points, key=lambda point: point[0])
inEllipsoid = []
for distance, rotatedPoint, movedPoint, octant in points:
composite = compositesByRotatedPoint[rotatedPoint]
inEllipsoid.append((distance, composite, octant))
# se seleccionan las muestras que cumplen con los criterios pedidos
selectedSamples = ellipsoid.selectSamples(inEllipsoid)
if len(selectedSamples) > 0:
print('se utilizaron {}'.format(len(selectedSamples)))
blockpoint = (block.x, block.y, block.z)
weights, variance = kriging.ordinary(selectedSamples, blockpoint)
value = 0
for i in range(len(selectedSamples)):
_, comp, _ = selectedSamples[i]
# capping
gradeComp = comp[var] if comp[var] <= cap else cap
value = gradeComp * weights[i]
block.grade = value
def exportBlockModel(blockModel):
# Exportación modelo de bloques
outfile = 'modelo_estimado_sondaje.csv'
outfile = open(outfile, 'w')
outfile.write('x,y,z,grade\n')
for block in blockModel:
if hasattr(block, 'grade'):
line = block.x, block.y, block.z, block.grade
else:
line = block.x, block.y, block.z, -99
outfile.write("%f,%f,%f,%f\n" % line)
outfile.close()
def getObjects():
# se carga el modelo de bloques, compósitos y script de categoría
blockModel = BlockModel(path=blockPath, x='midx', y='midy', z='midz', readBlocks=True)
# composites = Composites(path=compPath, holeid='dhid', middlex='midx', middley='midy', middlez='midz',
# from_='from', to_='to', columns=compColumns, readComposites=True)
composites = Composites(path=compPath, holeid='dhid', middlex='midx', middley='midy', middlez='midz',
columns=compColumns, readComposites=True)
major, medium, minor = 100, 100, 100
bearing, plunge, dip = 0, 0, 0
minSamples, maxSamples = 10, 100
minSamplesByOctant, maxSamplesByOctant = 1, 100
minOctantWithSamples, maxSamplesByDrillhole = 1, 100
ellipsoid = SearchEllipsoid(major=major, medium=medium, minor=minor, bearing=bearing, plunge=plunge, dip=dip,
minSamples=minSamples, maxSamples=maxSamples,
minSamplesByOctant=minSamplesByOctant, maxSamplesByOctant=maxSamplesByOctant,
minOctantWithSamples=minOctantWithSamples, maxSamplesByDrillhole=maxSamplesByDrillhole)
return blockModel, composites, ellipsoid
if __name__ == '__main__':
run()
|
nilq/baby-python
|
python
|
"""This module demonstrates usage of if-else statements, while loop and break."""
def calculate_grade(grade):
"""Function that calculates final grades based on points earned."""
if grade >= 90:
if grade == 100:
return 'A+'
return 'A'
if grade >= 80:
return 'B'
if grade >= 70:
return 'C'
return 'F'
if __name__ == '__main__':
while True:
grade_str = input('Number of points (<ENTER> for END): ')
if len(grade_str) == 0:
break
points = int(grade_str)
print(calculate_grade(points))
print('Good Bye!')
|
nilq/baby-python
|
python
|
from pinata.response import PinataResponse
from pinata.session import PinataAPISession
class PinataClient:
def __init__(self, session: PinataAPISession, api_namespace: str):
self.session = session
self._prefix = api_namespace
def _post(self, uri, *args, **kwargs) -> PinataResponse:
return self.session.post(self._uri(uri), *args, **kwargs)
def _get(self, uri, *args, **kwargs) -> PinataResponse:
return self.session.get(self._uri(uri), *args, **kwargs)
def _delete(self, uri, *args, **kwargs) -> PinataResponse:
return self.session.delete(self._uri(uri), *args, **kwargs)
def _uri(self, uri: str) -> str:
return f"/{self._prefix}/{uri}"
__all__ = ["PinataClient"]
|
nilq/baby-python
|
python
|
import music_trees as mt
from music_trees.tree import MusicTree
from copy import deepcopy
import random
from tqdm import tqdm
NUM_TAXONOMIES = 10
NUM_SHUFFLES = 1000
output_dir = mt.ASSETS_DIR / 'taxonomies'
output_dir.mkdir(exist_ok=True)
target_tree = mt.utils.data.load_entry(
mt.ASSETS_DIR / 'taxonomies' / 'deeper-mdb.yaml', format='yaml')
target_tree = MusicTree.from_taxonomy(target_tree)
def scramble_tree(tree: MusicTree):
"scramble a class tree"
# first, copy the tree
tree = deepcopy(tree)
# shuffle many times
for _ in tqdm(list(range(NUM_SHUFFLES))):
# get all of the leaves twice
A = tree.leaves()
B = tree.leaves()
# scramble one of them
random.shuffle(B)
# swap a and b for all A and B
for an, bn in zip(A, B):
tree.swap_leaves(an, bn)
return tree
def export_tree(tree: MusicTree, fp: str):
mt.utils.data.save_entry(tree._2dict()['root'], fp, format='yaml')
if __name__ == "__main__":
for i in range(NUM_TAXONOMIES):
t = scramble_tree(target_tree)
# breakpoint()
fp = output_dir / f'scrambled-{i}'
export_tree(t, fp)
|
nilq/baby-python
|
python
|
from PIL import Image
import sys
im = Image.new("L", (256, 256))
c = 0
with open(sys.argv[1], "rb") as f:
f.read(8)
byte = f.read(1)
while c < 65536:
#print(c)
im.putpixel((c % 256, int(c / 256)), ord(byte))
byte = f.read(1)
c = c + 1
im.save("fog.png")
|
nilq/baby-python
|
python
|
from ..std.index import *
from .math3d import *
from .math2d import *
from ..df.blizzardj import bj_mapInitialPlayableArea
class TerrainGrid(Rectangle):
grids = []
_loc = None
def __init__(self,r,sampling=8):
Rectangle.__init__(self,GetRectMinX(r),GetRectMinY(r),GetRectMaxX(r),GetRectMaxY(r))
TerrainGrid.grids.append(self)
self.sampling = sampling
_l = TerrainGrid._loc
_zgrid=None
"""[[luacode]]
local _zgrid = {}
"""
for X in range(math.floor(self.maxx - self.minx) / sampling):
"""[[luacode]]
_zgrid[X] = {}
"""
for Y in range(math.floor(self.maxy - self.miny) / sampling):
MoveLocation(_l, X * sampling + self.minx, Y * sampling + self.miny)
"""[[luacode]]
_zgrid[X][Y] = GetLocationZ(_l)
"""
self.grid = _zgrid
def get_z(self,x,y):
X = math.floor((x - self.minx) / self.sampling)
Y = math.floor((y - self.miny) / self.sampling)
return self.grid[X][Y]
@staticmethod
def z(x,y):
for g in TerrainGrid.grids:
if Vector2(x,y,True) in g:
return g.get_z(x,y)
MoveLocation(TerrainGrid._loc,x,y)
return GetLocationZ(TerrainGrid._loc)
@staticmethod
def _init():
TerrainGrid._loc = Location(0,0)
AddScriptHook(TerrainGrid._init,MAIN_BEFORE)
def _ft(x,y,temp=False):
z = TerrainGrid.z(x,y)
if IsTerrainPathable(x, y, PATHING_TYPE_WALKABILITY): z += 2000.0
return Vector3(x,y,z,temp)
Vector3.from_terrain = _ft
|
nilq/baby-python
|
python
|
from collections.abc import Callable
def update( # <1>
probe: Callable[[], float], # <2>
display: Callable[[float], None] # <3>
) -> None:
temperature = probe()
# imagine lots of control code here
display(temperature)
def probe_ok() -> int: # <4>
return 42
def display_wrong(temperature: int) -> None: # <5>
print(hex(temperature))
update(probe_ok, display_wrong) # type error # <6>
def display_ok(temperature: complex) -> None: # <7>
print(temperature)
update(probe_ok, display_ok) # OK # <8>
|
nilq/baby-python
|
python
|
# pylint: disable=invalid-name
# pylint: disable=too-many-locals
# pylint: disable=too-many-arguments
# pylint: disable=too-many-statements
# pylint: disable=unbalanced-tuple-unpacking
# pylint: disable=consider-using-f-string)
# pylint: disable=too-many-lines
"""
A module for finding M² values for a laser beam.
Full documentation is available at <https://laserbeamsize.readthedocs.io>
Start with necessary imports::
>>>> import numpy as np
>>>> import laserbeamsize as lbs
Finding the beam waist size, location, and M² for a beam is straightforward::
>>>> lambda0 = 632.8e-9 # m
>>>> z = np.array([168, 210, 280, 348, 414, 480, 495, 510, 520, 580, 666, 770])
>>>> r = np.array([597, 572, 547, 554, 479, 403, 415, 400, 377, 391, 326, 397])
>>>> lbs.M2_report(z * 1e-3, 2 * r * 1e-6, lambda0)
A graphic of the fit to diameters can be created by::
>>>> lbs.M2_diameter_plot(z * 1e-3, 2 * r * 1e-6, lambda0)
>>>> plt.show()
A graphic of the radial fit can be created by::
>>>> lbs.M2_radius_plot(z * 1e-3, 2 * r * 1e-6, lambda0)
>>>> plt.show()
"""
import scipy.optimize
import matplotlib.gridspec
import matplotlib.pyplot as plt
import numpy as np
__all__ = ('z_rayleigh',
'beam_radius',
'magnification',
'image_distance',
'curvature',
'divergence',
'gouy_phase',
'focused_diameter',
'beam_parameter_product',
'artificial_to_original',
'M2_fit',
'M2_report',
'M2_diameter_plot',
'M2_radius_plot',
'M2_focus_plot'
)
def z_rayleigh(w0, lambda0, M2=1):
"""
Return the Rayleigh distance for a Gaussian beam.
Args:
w0: minimum beam radius [m]
lambda0: wavelength of light [m]
Returns:
z: axial distance from focus that irradiance has dropped 50% [m]
"""
return np.pi * w0**2 / lambda0 / M2
def beam_radius(w0, lambda0, z, z0=0, M2=1):
"""
Return the beam radius at an axial location.
Args:
w0: minimum beam radius [m]
lambda0: wavelength of light [m]
z: axial location of desired beam radius [m]
z0: axial location of beam waist [m]
M2: beam propagation factor [-]
Returns:
r: beam radius at axial position [m]
"""
zz = (z - z0) / z_rayleigh(w0, lambda0, M2)
return w0 * np.sqrt(1 + zz**2)
def magnification(w0, lambda0, s, f, M2=1):
"""
Return the magnification of a Gaussian beam.
If the beam waist is before the lens, then the distance s
will be negative, i.e. if it is at the front focus of the lens (s=-f).
The new beam waist will be `m * w0` and the new Rayleigh
distance will be `m**2 * zR`
Args:
f: focal distance of lens [m]
zR: Rayleigh distance [m]
s: distance of beam waist to lens [m]
Returns:
m: magnification [-]
"""
zR2 = z_rayleigh(w0, lambda0, M2)**2
return f / np.sqrt((s + f)**2 + zR2)
def curvature(w0, lambda0, z, z0=0, M2=1):
"""
Calculate the radius of curvature of a Gaussian beam.
The curvature will be a maximum at the Rayleigh distance and
it will be infinite at the beam waist.
Args:
w0: minimum beam radius [m]
lambda0: wavelength of light [m]
z axial position along beam [m]
z0 axial position of the beam waist [m]
M2: beam propagation factor [-]
Returns:
R: radius of curvature of field at z [m]
"""
zR2 = z_rayleigh(w0, lambda0, M2)**2
return (z - z0) + zR2 / (z - z0)
def divergence(w0, lambda0, M2=1):
"""
Calculate the full angle of divergence of a Gaussian beam.
Args:
w0: minimum beam radius [m]
lambda0: wavelength of light [m]
M2: beam propagation factor [-]
Returns:
theta: divergence of beam [radians]
"""
return 2 * w0 / z_rayleigh(w0, lambda0, M2)
def gouy_phase(w0, lambda0, z, z0=0):
"""
Calculate the Gouy phase of a Gaussian beam.
Args:
w0: minimum beam radius [m]
lambda0: wavelength of light [m]
z: axial position along beam [m]
z0: axial position of beam waist [m]
Returns:
phase: Gouy phase at axial position [radians]
"""
zR = z_rayleigh(w0, lambda0)
return -np.arctan2(z - z0, zR)
def focused_diameter(f, lambda0, d, M2=1):
"""
Diameter of diffraction-limited focused beam.
see eq 6b from Roundy, "Current Technology of Beam Profile Measurements"
in Laser Beam Shaping: Theory and Techniques by Dickey, 2000
Args:
f: focal length of lens [m]
lambda0: wavelength of light [m]
d: diameter of limiting aperture [m]
M2: beam propagation factor [-]
Returns:
d: diffraction-limited beam diameter [m]
"""
return 4 * M2**2 * lambda0 * f / (np.pi * d)
def beam_parameter_product(Theta, d0, Theta_std=0, d0_std=0):
"""
Find the beam parameter product (BPP).
Better beam quality is associated with the lower BPP values. The best
(smallest) BPP is λ / π and corresponds to a diffraction-limited Gaussian beam.
Args:
Theta: full beam divergence angle [radians]
d0: beam waist diameter [m]
Theta_std: std. dev. of full beam divergence angle [radians]
d0_std: std. dev. of beam waist diameter [m]
Returns:
BPP: Beam parameter product [m * radian]
BPP_std: standard deviation of beam parameter product [m * radian]
"""
BPP = Theta * d0 / 4
BPP_std = BPP * np.sqrt((Theta_std / Theta)**2 + (d0_std / d0)**2)
return BPP, BPP_std
def image_distance(w0, lambda0, s, f, M2=1):
"""
Return the image location of a Gaussian beam.
The default case is when the beam waist is located at
the front focus of the lens (s=-f).
Args:
s: distance of beam waist to lens [m]
f: focal distance of lens [m]
w0: minimum beam radius [m]
lambda0: wavelength of light [m]
M2: beam propagation factor [-]
Returns:
z: location of new beam waist [m]
"""
zR2 = z_rayleigh(w0, lambda0, M2)**2
return f * (s * f + s * s + zR2) / ((f + s)**2 + zR2)
def _abc_fit(z, d, lambda0):
"""
Return beam parameters for beam diameter measurements.
Follows ISO 11146-1 section 9 and uses the standard `polyfit` routine
in `numpy` to find the coefficients `a`, `b`, and `c`.
d(z)**2 = a + b * z + c * z**2
These coefficients are used to determine the beam parameters using
equations 25-29 from ISO 11146-1.
Unfortunately, standard error propagation fails to accurately determine
the standard deviations of these parameters. Therefore the error calculation
lines are commented out and only the beam parameters are returned.
Args:
z: axial position of beam measurement [m]
d: beam diameter [m]
Returns:
d0: beam waist diameter [m]
z0: axial location of beam waist [m]
M2: beam propagation parameter [-]
Theta: full beam divergence angle [radians]
zR: Rayleigh distance [m]
"""
nlfit, _nlpcov = np.polyfit(z, d**2, 2, cov=True)
# unpack fitting parameters
c, b, a = nlfit
z0 = -b / (2 * c)
Theta = np.sqrt(c)
disc = np.sqrt(4 * a * c - b * b) / 2
M2 = np.pi / 4 / lambda0 * disc
d0 = disc / np.sqrt(c)
zR = disc / c
params = [d0, z0, Theta, M2, zR]
# unpack uncertainties in fitting parameters from diagonal of covariance matrix
# c_std, b_std, a_std = [np.sqrt(_nlpcov[j, j]) for j in range(nlfit.size)]
# z0_std = z0 * np.sqrt(b_std**2/b**2 + c_std**2/c**2)
# d0_std = np.sqrt((4 * c**2 * a_std)**2 + (2 * b * c * b_std)**2 \
# + (b**2 * c_std)**2) / (8 * c**2 * d0)
# Theta_std = c_std/2/np.sqrt(c)
# zR_std = np.sqrt(4 * c**4 * a_std**2 + b**2 * c**2 * b_std**2 + \
# (b**2-2 * a * c)**2 * c_std**2)/(4 * c**3) / zR
# M2_std = np.pi**2 * np.sqrt(4 * c**2 * a_std**2 + b**2 * b_std**2 + \
# 4 * a**2 * c_std**2)/(64 * lambda0**2) / M2
# errors = [d0_std, z0_std, M2_std, Theta_std, zR_std]
return params
def _beam_fit_fn_(z, d0, z0, Theta):
"""Fitting function for d0, z0, and Theta."""
return np.sqrt(d0**2 + (Theta * (z - z0))**2)
def _beam_fit_fn_2(z, d0, Theta):
"""Fitting function for d0 and Theta."""
return np.sqrt(d0**2 + (Theta * z)**2)
def _beam_fit_fn_3(z, z0, Theta):
"""Fitting function for z0 and Theta."""
return np.abs(Theta * (z - z0))
def _beam_fit_fn_4(z, Theta):
"""Fitting function for just Theta."""
return np.abs(Theta * z)
def basic_beam_fit(z, d, lambda0, z0=None, d0=None):
"""
Return the hyperbolic fit to the supplied diameters.
Follows ISO 11146-1 section 9 but `a`, `b`, and `c` have been
replaced by beam parameters `d0`, `z0`, and Theta. The equation
for the beam diameter `d(z)` is
d(z)**2 = d0**2 + Theta**2 * (z-z0)**2
A non-linear curve fit is done to determine the beam parameters and the
standard deviations of those parameters. The beam parameters are returned
in one array and the errors in a separate array::
d0: beam waist diameter [m]
z0: axial location of beam waist [m]
Theta: full beam divergence angle [radians]
M2: beam propagation parameter [-]
zR: Rayleigh distance [m]
Args:
z: array of axial position of beam measurements [m]
d: array of beam diameters [m]
lambda0: wavelength of the laser [m]
Returns:
params: [d0, z0, Theta, M2, zR]
errors: array with standard deviations of above values
"""
# approximate answer
i = np.argmin(d)
d0_guess = d[i]
z0_guess = z[i]
# fit data using SciPy's curve_fit() algorithm
if z0 is None:
if d0 is None:
i = np.argmax(abs(z - z0_guess))
theta_guess = abs(d[i] / (z[i] - z0_guess))
p0 = [d0_guess, z0_guess, theta_guess]
nlfit, nlpcov = scipy.optimize.curve_fit(_beam_fit_fn_, z, d, p0=p0)
d0, z0, Theta = nlfit
d0_std, z0_std, Theta_std = [np.sqrt(nlpcov[j, j]) for j in range(nlfit.size)]
else:
i = np.argmax(abs(z - z0_guess))
theta_guess = abs(d[i] / (z[i] - z0_guess))
p0 = [z0_guess, theta_guess]
dd = np.sqrt(d**2 - d0**2)
nlfit, nlpcov = scipy.optimize.curve_fit(_beam_fit_fn_3, z, dd, p0=p0)
z0, Theta = nlfit
z0_std, Theta_std = [np.sqrt(nlpcov[j, j]) for j in range(nlfit.size)]
d0_std = 0
else:
i = np.argmax(abs(z - z0))
theta_guess = abs(d[i] / (z[i] - z0))
if d0 is None:
p0 = [d0_guess, theta_guess]
nlfit, nlpcov = scipy.optimize.curve_fit(_beam_fit_fn_2, z - z0, d, p0=p0)
d0, Theta = nlfit
d0_std, Theta_std = [np.sqrt(nlpcov[j, j]) for j in range(nlfit.size)]
z0_std = 0
else:
p0 = [theta_guess]
dd = np.sqrt(d**2 - d0**2)
nlfit, nlpcov = scipy.optimize.curve_fit(_beam_fit_fn_4, z - z0, dd, p0=p0)
Theta = nlfit[0]
Theta_std = np.sqrt(nlpcov[0, 0])
z0_std = 0
d0_std = 0
# divergence and Rayleigh range of Gaussian beam
Theta0 = 4 * lambda0 / (np.pi * d0)
zR = np.pi * d0**2 / (4 * lambda0)
M2 = Theta / Theta0
zR = np.pi * d0**2 / (4 * lambda0 * M2)
M2_std = M2 * np.sqrt((Theta_std / Theta)**2 + (d0_std / d0)**2)
zR_std = zR * np.sqrt((M2_std / M2)**2 + (2 * d0_std / d0)**2)
params = [d0, z0, Theta, M2, zR]
errors = [d0_std, z0_std, Theta_std, M2_std, zR_std]
return params, errors
def max_index_in_focal_zone(z, zone):
"""Return index farthest from focus in inner zone."""
_max = -1e32
imax = None
for i, zz in enumerate(z):
if zone[i] == 1:
if _max < zz:
_max = zz
imax = i
return imax
def min_index_in_outer_zone(z, zone):
"""Return index of measurement closest to focus in outer zone."""
_min = 1e32
imin = None
for i, zz in enumerate(z):
if zone[i] == 2:
if zz < _min:
_min = zz
imin = i
return imin
def M2_fit(z, d, lambda0, strict=False, z0=None, d0=None):
"""
Return the hyperbolic fit to the supplied diameters.
Follows ISO 11146-1 section 9 but `a`, `b`, and `c` have been
replaced by beam parameters `d0`, `z0`, and Theta. The equation
for the beam diameter `d(z)` is
d(z)**2 = d0**2 + Theta**2 * (z - z0)**2
A non-linear curve fit is done to determine the beam parameters and the
standard deviations of those parameters. The beam parameters are returned
in one array and the errors in a separate array::
d0: beam waist diameter [m]
z0: axial location of beam waist [m]
Theta: full beam divergence angle [radians]
M2: beam propagation parameter [-]
zR: Rayleigh distance [m]
When `strict==True`, an estimate is made for the location of the beam focus
and the Rayleigh distance. These values are then used to divide the
measurements into three zones::
* those within one Rayleigh distance of the focus,
* those between 1 and 2 Rayleigh distances, and
* those beyond two Rayleigh distances.
values are used or unused depending on whether they comply with a strict
reading of the ISO 11146-1 standard which requires::
... measurements at at least 10 different z positions shall be taken.
Approximately half of the measurements shall be distributed within
one Rayleigh length on either side of the beam waist, and approximately
half of them shall be distributed beyond two Rayleigh lengths
from the beam waist.
Args:
z: array of axial position of beam measurements [m]
d: array of beam diameters [m]
lambda0: wavelength of the laser [m]
strict: (optional) boolean for strict usage of ISO 11146
z0: (optional) location of beam waist [m]
d0: (optional) diameter of beam waist [m]
Returns:
params: [d0, z0, Theta, M2, zR]
errors: [d0_std, z0_std, Theta_std, M2_std, zR_std]
used: boolean array indicating if data point is used
"""
used = np.full_like(z, True, dtype=bool)
params, errors = basic_beam_fit(z, d, lambda0, z0=z0, d0=d0)
if not strict:
return params, errors, used
z0 = params[1]
zR = params[4]
# identify zones (0=unused, 1=focal region, 2=outer region)
zone = np.zeros_like(z)
for i, zz in enumerate(z):
if abs(zz - z0) <= 1.01 * zR:
zone[i] = 1
if 1.99 * zR <= abs(zz - z0):
zone[i] = 2
# count points in each zone
n_focal = np.sum(zone == 1)
n_outer = np.sum(zone == 2)
if n_focal + n_outer < 10 or n_focal < 4 or n_outer < 4:
print("Invalid distribution of measurements for ISO 11146")
print("%d points within 1 Rayleigh distance" % n_focal)
print("%d points greater than 2 Rayleigh distances" % n_outer)
return params, errors, used
# mark extra points in outer zone closest to focus as unused
extra = n_outer - n_focal
if n_focal == 4:
extra = n_outer - 6
for _ in range(extra):
zone[min_index_in_outer_zone(abs(z - z0), zone)] = 0
# mark extra points in focal zone farthest from focus as unused
extra = n_outer - n_focal
if n_outer == 4:
extra = n_focal - 6
for _ in range(n_focal - n_outer):
zone[max_index_in_focal_zone(abs(z - z0), zone)] = 0
# now find beam parameters with 50% focal and 50% outer zone values
used = zone != 0
dd = d[used]
zz = z[used]
params, errors = basic_beam_fit(zz, dd, lambda0, z0=z0, d0=d0)
return params, errors, used
def M2_string(params, errors):
"""
Return string describing a single set of beam measurements.
Args:
z: array of axial position of beam measurements [m]
d: array of beam diameters [m]
lambda0: wavelength of the laser [m]
Returns:
s: formatted string suitable for printing.
"""
d0, z0, Theta, M2, zR = params
d0_std, z0_std, Theta_std, M2_std, zR_std = errors
BPP, BPP_std = beam_parameter_product(Theta, d0, Theta_std, d0_std)
s = ''
s += " M^2 = %.2f ± %.2f\n" % (M2, M2_std)
s += "\n"
s += " d_0 = %.0f ± %.0f µm\n" % (d0 * 1e6, d0_std * 1e6)
s += " w_0 = %.0f ± %.0f µm\n" % (d0 / 2 * 1e6, d0_std / 2 * 1e6)
s += "\n"
s += " z_0 = %.0f ± %.0f mm\n" % (z0 * 1e3, z0_std * 1e3)
s += " z_R = %.0f ± %.0f mm\n" % (zR * 1e3, zR_std * 1e3)
s += "\n"
s += " Theta = %.2f ± %.2f mrad\n" % (Theta * 1e3, Theta_std * 1e3)
s += "\n"
s += " BPP = %.2f ± %.2f mm mrad\n" % (BPP * 1e6, BPP_std * 1e6)
return s
def artificial_to_original(params, errors, f, hiatus=0):
"""
Convert artificial beam parameters to original beam parameters.
ISO 11146-1 section 9 equations are used to retrieve the original beam
parameters from parameters measured for an artificial waist
created by focusing the beam with a lens.
M2 does not change.
Ideally, the waist position would be relative to the rear principal
plane of the lens and the original beam waist position would be corrected
by the hiatus between the principal planes of the lens.
The beam parameters are in an array `[d0,z0,Theta,M2,zR]` ::
d0: beam waist diameter [m]
z0: axial location of beam waist [m]
Theta: full beam divergence angle [radians]
M2: beam propagation parameter [-]
zR: Rayleigh distance [m]
The errors that are returned are not quite right at the moment.
Args:
params: array of artificial beam parameters
errors: array with std dev of above parameters
f: focal length of lens [m]
hiatus: distance between principal planes of focusing lens [m]
Returns:
params: array of original beam parameters (without lens)
errors: array of std deviations of above parameters
"""
art_d0, art_z0, art_Theta, M2, art_zR = params
art_d0_std, art_z0_std, art_Theta_std, M2_std, art_zR_std = errors
x2 = art_z0 - f
V = f / np.sqrt(art_zR**2 + x2**2)
orig_d0 = V * art_d0
orig_d0_std = V * art_d0_std
orig_z0 = V**2 * x2 + f - hiatus
orig_z0_std = V**2 * art_z0_std
orig_zR = V**2 * art_zR
orig_zR_std = V**2 * art_zR_std
orig_Theta = art_Theta / V
orig_Theta_std = art_Theta_std / V
o_params = [orig_d0, orig_z0, orig_Theta, M2, orig_zR]
o_errors = [orig_d0_std, orig_z0_std, orig_Theta_std, M2_std, orig_zR_std]
return o_params, o_errors
def _M2_report(z, d, lambda0, f=None, strict=False, z0=None, d0=None):
"""
Return string describing a single set of beam measurements.
Args:
z: array of axial position of beam measurements [m]
d: array of beam diameters [m]
lambda0: wavelength of the laser [m]
Returns:
s: formatted string suitable for printing.
"""
params, errors, _ = M2_fit(z, d, lambda0, strict, z0=z0, d0=d0)
if f is None:
s = "Beam propagation parameters\n"
s += M2_string(params, errors)
return s
s = "Beam propagation parameters for the focused beam\n"
s += M2_string(params, errors)
o_params, o_errors = artificial_to_original(params, errors, f)
s += "\nBeam propagation parameters for the laser beam\n"
s += M2_string(o_params, o_errors)
return s
def M2_report(z, dx, lambda0, dy=None, f=None, strict=False, z0=None, d0=None):
"""
Return string describing a one or more sets of beam measurements.
Example::
>>>> import numpy as np
>>>> import laserbeamsize as lbs
>>>> lambda0 = 632.8e-9 # meters
>>>> z = np.array([168, 210, 280, 348, 414, 480, 495, 510, 520, 580, 666, 770])
>>>> r = np.array([597, 572, 547, 554, 479, 403, 415, 400, 377, 391, 326, 397])
>>>> s = lbs.M2_report(z * 1e-3, 2 * r * 1e-6, lambda0)
>>>> print(s)
Args:
z: array of axial position of beam measurements [m]
dx: array of beam diameters for semi-major axis [m]
lambda0: wavelength of the laser [m]
dy: (optional) array of beam diameters for semi-minor axis [m]
f: (optional) focal length of lens [m]
strict: (optional) boolean for strict usage of ISO 11146
z0: (optional) location of beam waist [m]
d0: (optional) diameter of beam waist [m]
Returns:
s: formatted string suitable for printing.
"""
if dy is None:
s = _M2_report(z, dx, lambda0, f=f, strict=strict, z0=z0, d0=d0)
return s
params, errors, _ = M2_fit(z, dx, lambda0, strict=strict, z0=z0, d0=d0)
d0x, z0x, Thetax, M2x, zRx = params
d0x_std, z0x_std, Thetax_std, M2x_std, zRx_std = errors
params, errors, _ = M2_fit(z, dy, lambda0, strict=strict, z0=z0, d0=d0)
d0y, z0y, Thetay, M2y, zRy = params
d0y_std, z0y_std, Thetay_std, M2y_std, zRy_std = errors
z0 = (z0x + z0y) / 2
z0_std = np.sqrt(z0x_std**2 + z0y_std**2)
d0 = (d0x + d0y) / 2
d0_std = np.sqrt(d0x_std**2 + d0y_std**2)
zR = (zRx + zRy) / 2
zR_std = np.sqrt(zRx_std**2 + zRy_std**2)
Theta = (Thetax + Thetay) / 2
Theta_std = np.sqrt(Thetax_std**2 + Thetay_std**2)
M2 = np.sqrt(M2x * M2y)
M2_std = np.sqrt(M2x_std**2 + M2y_std**2)
BPP, BPP_std = beam_parameter_product(Theta, d0, Theta_std, d0_std)
BPPx, BPPx_std = beam_parameter_product(Thetax, d0x, Thetax_std, d0x_std)
BPPy, BPPy_std = beam_parameter_product(Thetay, d0y, Thetay_std, d0y_std)
tag = ''
if f is not None:
tag = " of the focused beam"
s = "Beam propagation parameters derived from hyperbolic fit\n"
s += "Beam Propagation Ratio%s\n" % tag
s += " M2 = %.2f ± %.2f\n" % (M2, M2_std)
s += " M2x = %.2f ± %.2f\n" % (M2x, M2x_std)
s += " M2y = %.2f ± %.2f\n" % (M2y, M2y_std)
s += "Beam waist diameter%s\n" % tag
s += " d0 = %.0f ± %.0f µm\n" % (d0 * 1e6, d0_std * 1e6)
s += " d0x = %.0f ± %.0f µm\n" % (d0x * 1e6, d0x_std * 1e6)
s += " d0y = %.0f ± %.0f µm\n" % (d0y * 1e6, d0y_std * 1e6)
s += "Beam waist location%s\n" % tag
s += " z0 = %.0f ± %.0f mm\n" % (z0 * 1e3, z0_std * 1e3)
s += " z0x = %.0f ± %.0f mm\n" % (z0x * 1e3, z0x_std * 1e3)
s += " z0y = %.0f ± %.0f mm\n" % (z0y * 1e3, z0y_std * 1e3)
s += "Rayleigh Length%s\n" % tag
s += " zR = %.0f ± %.0f mm\n" % (zR * 1e3, zR_std * 1e3)
s += " zRx = %.0f ± %.0f mm\n" % (zRx * 1e3, zRx_std * 1e3)
s += " zRy = %.0f ± %.0f mm\n" % (zRy * 1e3, zRy_std * 1e3)
s += "Divergence Angle%s\n" % tag
s += " theta = %.2f ± %.2f milliradians\n" % (Theta * 1e3, Theta_std * 1e3)
s += " theta_x = %.2f ± %.2f milliradians\n" % (Thetax * 1e3, Thetax_std * 1e3)
s += " theta_y = %.2f ± %.2f milliradians\n" % (Thetay * 1e3, Thetay_std * 1e3)
s += "Beam parameter product%s\n" % tag
s += " BPP = %.2f ± %.2f mm * mrad\n" % (BPP * 1e6, BPP_std * 1e6)
s += " BPP_x = %.2f ± %.2f mm * mrad\n" % (BPPx * 1e6, BPPx_std * 1e6)
s += " BPP_y = %.2f ± %.2f mm * mrad\n" % (BPPy * 1e6, BPPy_std * 1e6)
if f is None:
return s
# needs to be completed
x2 = z0x - f
y2 = z0y - f
r2 = z0 - f
Vx = f / np.sqrt(zRx**2 + x2**2)
Vy = f / np.sqrt(zRy**2 + y2**2)
V = f / np.sqrt(zR**2 + r2**2)
d0x *= Vx
d0y *= Vy
d0 *= V
z0x = Vx**2 * x2 + f
z0y = Vy**2 * y2 + f
z0 = V**2 * r2 + f
return s
def _fit_plot(z, d, lambda0, strict=False, z0=None, d0=None):
"""
Plot beam diameters and ISO 11146 fit.
Args:
z: array of axial position of beam measurements [m]
d: array of beam diameters [m]
lambda0: wavelength of the laser [m]
Returns:
residuals: array with differences between fit and data
z0: location of focus
zR: Rayleigh distance for beam
"""
params, errors, used = M2_fit(z, d, lambda0, strict=strict, z0=z0, d0=d0)
unused = np.logical_not(used)
d0, z0, Theta, M2, zR = params
d0_std, z0_std, Theta_std, M2_std, zR_std = errors
# fitted line
zmin = min(np.min(z), z0 - 4 * zR)
zmax = max(np.max(z), z0 + 4 * zR)
# plt.xlim(zmin, zmax)
z_fit = np.linspace(zmin, zmax)
# d_fit = np.sqrt(d0**2 + (Theta * (z_fit - z0))**2)
# plt.plot(z_fit * 1e3, d_fit * 1e6, ':k')
d_fit_lo = np.sqrt((d0 - d0_std)**2 + ((Theta - Theta_std) * (z_fit - z0))**2)
d_fit_hi = np.sqrt((d0 + d0_std)**2 + ((Theta + Theta_std) * (z_fit - z0))**2)
plt.fill_between(z_fit * 1e3, d_fit_lo * 1e6, d_fit_hi * 1e6, color='red', alpha=0.5)
# show perfect gaussian caustic when unphysical M2 arises
if M2 < 1:
Theta00 = 4 * lambda0 / (np.pi * d0)
d_00 = np.sqrt(d0**2 + (Theta00 * (z_fit - z0))**2)
plt.plot(z_fit * 1e3, d_00 * 1e6, ':k', lw=2, label="M²=1")
plt.legend(loc="lower right")
plt.fill_between(z_fit * 1e3, d_fit_lo * 1e6, d_fit_hi * 1e6, color='red', alpha=0.5)
# data points
plt.plot(z[used] * 1e3, d[used] * 1e6, 'o', color='black', label='used')
plt.plot(z[unused] * 1e3, d[unused] * 1e6, 'ok', mfc='none', label='unused')
plt.xlabel('')
plt.ylabel('')
tax = plt.gca().transAxes
plt.text(0.05, 0.30, '$M^2$ = %.2f±%.2f ' % (M2, M2_std), transform=tax)
plt.text(0.05, 0.25, '$d_0$ = %.0f±%.0f µm' % (d0 * 1e6, d0_std * 1e6), transform=tax)
plt.text(0.05, 0.15, '$z_0$ = %.0f±%.0f mm' % (z0 * 1e3, z0_std * 1e3), transform=tax)
plt.text(0.05, 0.10, '$z_R$ = %.0f±%.0f mm' % (zR * 1e3, zR_std * 1e3), transform=tax)
Theta_ = Theta * 1e3
Theta_std_ = Theta_std * 1e3
plt.text(0.05, 0.05, r'$\Theta$ = %.2f±%.2f mrad' % (Theta_, Theta_std_), transform=tax)
plt.axvline(z0 * 1e3, color='black', lw=1)
plt.axvspan((z0 - zR) * 1e3, (z0 + zR) * 1e3, color='cyan', alpha=0.3)
plt.axvspan((z0 - 2 * zR) * 1e3, (zmin) * 1e3, color='cyan', alpha=0.3)
plt.axvspan((z0 + 2 * zR) * 1e3, (zmax) * 1e3, color='cyan', alpha=0.3)
# plt.axhline(d0 * 1e6, color='black', lw=1)
# plt.axhspan((d0 + d0_std) * 1e6, (d0 - d0_std) * 1e6, color='red', alpha=0.1)
plt.title(r'$d^2(z) = d_0^2 + \Theta^2 (z - z_0)^2$')
if sum(z[unused]) > 0:
plt.legend(loc='upper right')
residuals = d - np.sqrt(d0**2 + (Theta * (z - z0))**2)
return residuals, z0, zR, used
def _M2_diameter_plot(z, d, lambda0, strict=False, z0=None, d0=None):
"""
Plot the fitted beam and the residuals.
Args:
z: array of axial position of beam measurements [m]
d: array of beam diameters [m]
lambda0: wavelength of the laser [m]
Returns:
nothing
"""
fig = plt.figure(1, figsize=(12, 8))
gs = matplotlib.gridspec.GridSpec(2, 1, height_ratios=[6, 2])
fig.add_subplot(gs[0])
residualsx, z0, zR, used = _fit_plot(z, d, lambda0, strict=strict, z0=z0, d0=d0)
unused = np.logical_not(used)
zmin = min(np.min(z), z0 - 4 * zR)
zmax = max(np.max(z), z0 + 4 * zR)
plt.ylabel('beam diameter (µm)')
plt.ylim(0, 1.1 * max(d) * 1e6)
fig.add_subplot(gs[1])
plt.plot(z * 1e3, residualsx * 1e6, "ro")
plt.plot(z[used] * 1e3, residualsx[used] * 1e6, 'ok', label='used')
plt.plot(z[unused] * 1e3, residualsx[unused] * 1e6, 'ok', mfc='none', label='unused')
plt.axhline(color="gray", zorder=-1)
plt.xlabel('axial position $z$ (mm)')
plt.ylabel('residuals (µm)')
plt.axvspan((z0 - zR) * 1e3, (z0 + zR) * 1e3, color='cyan', alpha=0.3)
plt.axvspan((z0 - 2 * zR) * 1e3, (zmin) * 1e3, color='cyan', alpha=0.3)
plt.axvspan((z0 + 2 * zR) * 1e3, (zmax) * 1e3, color='cyan', alpha=0.3)
def M2_diameter_plot(z, dx, lambda0, dy=None, strict=False, z0=None, d0=None):
"""
Plot the semi-major and semi-minor beam fits and residuals.
Example::
>>>> import numpy as np
>>>> import laserbeamsize as lbs
>>>> lambda0 = 632.8e-9 # meters
>>>> z = np.array([168, 210, 280, 348, 414, 480, 495, 510, 520, 580, 666, 770])
>>>> r = np.array([597, 572, 547, 554, 479, 403, 415, 400, 377, 391, 326, 397])
>>>> lbs.M2_diameter_plot(z * 1e-3, 2 * r * 1e-6, lambda0)
>>>> plt.show()
Args:
z: array of axial position of beam measurements [m]
lambda0: wavelength of the laser [m]
dx: array of beam diameters [m]
Returns:
nothing
"""
if dy is None:
_M2_diameter_plot(z, dx, lambda0, strict=strict, z0=z0, d0=d0)
return
ymax = 1.1 * max(np.max(dx), np.max(dy)) * 1e6
# Create figure window to plot data
fig = plt.figure(1, figsize=(12, 8))
gs = matplotlib.gridspec.GridSpec(2, 2, height_ratios=[6, 2])
# semi-major axis plot
fig.add_subplot(gs[0, 0])
residualsx, z0x, zR, used = _fit_plot(z, dx, lambda0, strict=strict, z0=z0, d0=d0)
zmin = min(np.min(z), z0x - 4 * zR)
zmax = max(np.max(z), z0x + 4 * zR)
unused = np.logical_not(used)
plt.ylabel('beam diameter (µm)')
plt.title('Semi-major Axis Diameters')
plt.ylim(0, ymax)
# semi-major residuals
fig.add_subplot(gs[1, 0])
ax = plt.gca()
plt.plot(z[used] * 1e3, residualsx[used] * 1e6, 'ok', label='used')
plt.plot(z[unused] * 1e3, residualsx[unused] * 1e6, 'ok', mfc='none', label='unused')
plt.axhline(color="gray", zorder=-1)
plt.xlabel('axial position $z$ (mm)')
plt.ylabel('residuals (µm)')
plt.axvspan((z0x - zR) * 1e3, (z0x + zR) * 1e3, color='cyan', alpha=0.3)
plt.axvspan((z0x - 2 * zR) * 1e3, (zmin) * 1e3, color='cyan', alpha=0.3)
plt.axvspan((z0x + 2 * zR) * 1e3, (zmax) * 1e3, color='cyan', alpha=0.3)
# semi-minor axis plot
fig.add_subplot(gs[0, 1])
residualsy, z0y, zR, used = _fit_plot(z, dy, lambda0, strict=strict, z0=z0, d0=d0)
unused = np.logical_not(used)
plt.title('Semi-minor Axis Diameters')
plt.ylim(0, ymax)
ymax = max(np.max(residualsx), np.max(residualsy)) * 1e6
ymin = min(np.min(residualsx), np.min(residualsy)) * 1e6
ax.set_ylim(ymin, ymax)
# semi-minor residuals
fig.add_subplot(gs[1, 1])
plt.plot(z[used] * 1e3, residualsy[used] * 1e6, 'ok', label='used')
plt.plot(z[unused] * 1e3, residualsy[unused] * 1e6, 'ok', mfc='none', label='unused')
plt.axhline(color="gray", zorder=-1)
plt.xlabel('axial position $z$ (mm)')
plt.ylabel('')
plt.axvspan((z0y - zR) * 1e3, (z0y + zR) * 1e3, color='cyan', alpha=0.3)
plt.axvspan((z0y - 2 * zR) * 1e3, (zmin) * 1e3, color='cyan', alpha=0.3)
plt.axvspan((z0y + 2 * zR) * 1e3, (zmax) * 1e3, color='cyan', alpha=0.3)
plt.ylim(ymin, ymax)
def M2_radius_plot(z, d, lambda0, strict=False, z0=None, d0=None):
"""
Plot radii, beam fits, and asymptotes.
Example::
>>>> import numpy as np
>>>> import laserbeamsize as lbs
>>>> lambda0 = 632.8e-9 # meters
>>>> z = np.array([168, 210, 280, 348, 414, 480, 495, 510, 520, 580, 666, 770])
>>>> r = np.array([597, 572, 547, 554, 479, 403, 415, 400, 377, 391, 326, 397])
>>>> lbs.M2_radius_plot(z * 1e-3, 2 * r * 1e-6, lambda0)
>>>> plt.show()
Args:
z: array of axial position of beam measurements [m]
d: array of beam diameters [m]
lambda0: wavelength of the laser [m]
Returns:
nothing
"""
params, errors, used = M2_fit(z, d, lambda0, strict=strict, z0=z0, d0=d0)
unused = np.logical_not(used)
d0, z0, Theta, M2, zR = params
d0_std, _, Theta_std, M2_std, _ = errors
plt.figure(1, figsize=(12, 8))
# fitted line
zmin = min(np.min(z - z0), -4 * zR) * 1.05 + z0
zmax = max(np.max(z - z0), +4 * zR) * 1.05 + z0
plt.xlim((zmin - z0) * 1e3, (zmax - z0) * 1e3)
z_fit = np.linspace(zmin, zmax)
d_fit = np.sqrt(d0**2 + (Theta * (z_fit - z0))**2)
# plt.plot((z_fit - z0) * 1e3, d_fit * 1e6 / 2, ':r')
# plt.plot((z_fit - z0) * 1e3, -d_fit * 1e6 / 2, ':r')
d_fit_lo = np.sqrt((d0 - d0_std)**2 + ((Theta - Theta_std) * (z_fit - z0))**2)
d_fit_hi = np.sqrt((d0 + d0_std)**2 + ((Theta + Theta_std) * (z_fit - z0))**2)
# asymptotes
r_left = -(z0 - zmin) * np.tan(Theta / 2) * 1e6
r_right = (zmax - z0) * np.tan(Theta / 2) * 1e6
plt.plot([(zmin - z0) * 1e3, (zmax - z0) * 1e3], [r_left, r_right], '--b')
plt.plot([(zmin - z0) * 1e3, (zmax - z0) * 1e3], [-r_left, -r_right], '--b')
# xticks along top axis
ticks = [(i * zR) * 1e3 for i in range(int((zmin - z0) / zR), int((zmax - z0) / zR) + 1)]
ticklabels1 = ["%.0f" % (z + z0 * 1e3) for z in ticks]
ticklabels2 = []
for i in range(int((zmin - z0) / zR), int((zmax - z0) / zR) + 1):
if i == 0:
ticklabels2 = np.append(ticklabels2, "0")
elif i == -1:
ticklabels2 = np.append(ticklabels2, r"-$z_R$")
elif i == 1:
ticklabels2 = np.append(ticklabels2, r"$z_R$")
else:
ticklabels2 = np.append(ticklabels2, r"%d$z_R$" % i)
ax1 = plt.gca()
ax2 = ax1.twiny()
ax1.set_xticks(ticks)
if len(ticks) > 10:
ax1.set_xticklabels(ticklabels1, fontsize=14, rotation=90)
else:
ax1.set_xticklabels(ticklabels1, fontsize=14)
ax2.set_xbound(ax1.get_xbound())
ax2.set_xticks(ticks)
if len(ticks) > 10:
ax2.set_xticklabels(ticklabels2, fontsize=14, rotation=90)
else:
ax2.set_xticklabels(ticklabels2, fontsize=14)
# usual labels for graph
ax1.set_xlabel('Axial Location (mm)', fontsize=14)
ax1.set_ylabel('Beam radius (µm)', fontsize=14)
title = r'$w_0=d_0/2$=%.0f±%.0fµm, ' % (d0 / 2 * 1e6, d0_std / 2 * 1e6)
title += r'$M^2$ = %.2f±%.2f, ' % (M2, M2_std)
title += r'$\lambda$=%.0f nm' % (lambda0 * 1e9)
plt.title(title, fontsize=16)
# show the divergence angle
s = r'$\Theta$ = %.2f±%.2f mrad' % (Theta * 1e3, Theta_std * 1e3)
plt.text(2 * zR * 1e3, 0, s, ha='left', va='center', fontsize=16)
arc_x = 1.5 * zR * 1e3
arc_y = 1.5 * zR * np.tan(Theta / 2) * 1e6
plt.annotate('', (arc_x, -arc_y), (arc_x, arc_y),
arrowprops=dict(arrowstyle="<->",
connectionstyle="arc3, rad=-0.2"))
# show the Rayleigh ranges
ymin = max(max(d_fit), max(d))
ymin *= -1 / 2 * 1e6
plt.text(0, ymin, '$-z_R<z-z_0<z_R$', ha='center', va='bottom', fontsize=16)
x = (zmax - z0 + 2 * zR) / 2 * 1e3
plt.text(x, ymin, '$2z_R < z-z_0$', ha='center', va='bottom', fontsize=16)
x = (zmin - z0 - 2 * zR) / 2 * 1e3
plt.text(x, ymin, '$z-z_0 < -2z_R$', ha='center', va='bottom', fontsize=16)
ax1.axvspan((-zR) * 1e3, (+zR) * 1e3, color='cyan', alpha=0.3)
ax1.axvspan((-2 * zR) * 1e3, (zmin - z0) * 1e3, color='cyan', alpha=0.3)
ax1.axvspan((+2 * zR) * 1e3, (zmax - z0) * 1e3, color='cyan', alpha=0.3)
# show the fit
zz = (z_fit - z0) * 1e3
lo = d_fit_lo * 1e6 / 2
hi = d_fit_hi * 1e6 / 2
ax1.fill_between(zz, lo, hi, color='red', alpha=0.5)
ax1.fill_between(zz, -lo, -hi, color='red', alpha=0.5)
# show perfect gaussian caustic when unphysical M2 arises
if M2 < 1:
Theta00 = 4 * lambda0 / (np.pi * d0)
r_00 = np.sqrt(d0**2 + (Theta00 * zz * 1e-3)**2) / 2 * 1e6
plt.plot(zz, r_00, ':k', lw=2, label="M²=1")
plt.plot(zz, -r_00, ':k', lw=2)
plt.legend(loc="lower right")
# data points
ax1.plot((z[used] - z0) * 1e3, d[used] * 1e6 / 2, 'ok', label='used')
ax1.plot((z[used] - z0) * 1e3, -d[used] * 1e6 / 2, 'ok')
ax1.plot((z[unused] - z0) * 1e3, d[unused] * 1e6 / 2, 'ok', mfc='none', label='unused')
ax1.plot((z[unused] - z0) * 1e3, -d[unused] * 1e6 / 2, 'ok', mfc='none')
if sum(z[unused]) > 0:
ax1.legend(loc='center left')
def M2_focus_plot(w0, lambda0, f, z0, M2=1):
"""
Plot a beam from its waist through a lens to its focus.
After calling this, use `plt.show()` to display the plot.
The lens is at `z=0` with respect to the beam waist. All distances to
the left of the lens are negative and those to the right are positive.
The beam has a waist at `z0`. If the beam waist is at the front focal
plane of the lens then `z0=-f`.
Args:
w0: beam radius at waist [m]
lambda0: wavelength of beam [m]
f: focal length of lens [m]
z0: location of beam waist [m]
M2: beam propagation factor [-]
Returns:
nothing.
"""
# plot the beam from just before the waist to the lens
left = 1.1 * z0
z = np.linspace(left, 0)
r = beam_radius(w0, lambda0, z, z0=z0, M2=M2)
plt.fill_between(z * 1e3, -r * 1e6, r * 1e6, color='red', alpha=0.2)
# find the gaussian beam parameters for the beam after the lens
w0_after = w0 * magnification(w0, lambda0, z0, f, M2=M2)
z0_after = image_distance(w0, lambda0, z0, f, M2=M2)
zR_after = z_rayleigh(w0_after, lambda0, M2)
# plot the beam after the lens
right = max(2 * f, z0_after + 4 * zR_after)
z_after = np.linspace(0, right)
r_after = beam_radius(w0_after, lambda0, z_after, z0=z0_after, M2=M2)
# plt.axhline(w0_after * 1.41e6)
plt.fill_between(z_after * 1e3, -r_after * 1e6, r_after * 1e6, color='red', alpha=0.2)
# locate the lens and the two beam waists
plt.axhline(0, color='black', lw=1)
plt.axvline(0, color='black')
plt.axvline(z0 * 1e3, color='black', linestyle=':')
plt.axvline(z0_after * 1e3, color='black', linestyle=':')
# finally, show the ±1 Rayleigh distance
zRmin = max(0, (z0_after - zR_after)) * 1e3
zRmax = (z0_after + zR_after) * 1e3
plt.axvspan(zRmin, zRmax, color='blue', alpha=0.1)
plt.xlabel('Axial Position Relative to Lens (mm)')
plt.ylabel('Beam Radius (microns)')
title = "$w_0$=%.0fµm, $z_0$=%.0fmm, " % (w0 * 1e6, z0 * 1e3)
title += "$w_0'$=%.0fµm, $z_0'$=%.0fmm, " % (w0_after * 1e6, z0_after * 1e3)
title += "$z_R'$=%.0fmm" % (zR_after * 1e3)
plt.title(title)
|
nilq/baby-python
|
python
|
'''
09.60 - Use the 8x8 LED Matrix with the max7219 driver using SPI
This sketch shows how to control the 8x8 LED Matrix to draw random pixels.
Components
----------
- ESP32
- One or more 8x8 LED matrix displays with the max7219 driver
- GND --> GND
- VCC --> 5V
- CS --> GPIO 5 (SPI SS)
- CLK --> GPIO 18 (SPI SCK)
- DIN --> GPIO 23 (SPI MOSI)
- Wires
- Breadboard
Documentation:
* Pins and GPIO: https://micropython-docs-esp32.readthedocs.io/en/esp32_doc/esp32/quickref.html#pins-and-gpio
* sleep: http://docs.micropython.org/en/latest/library/utime.html?highlight=utime%20sleep#utime.sleep
* SPI (hardware): https://docs.micropython.org/en/latest/esp32/quickref.html#hardware-spi-bus
* max7219: https://github.com/mcauser/micropython-max7219
* random function: https://docs.python.org/3/library/random.html
Course:
MicroPython with the ESP32
https://techexplorations.com
'''
import max7219
from machine import Pin, SPI
from utime import sleep_ms
from random import *
#spi = SPI(2, baudrate=10000000, polarity=1, phase=0, sck=Pin(18), mosi=Pin(23))
spi = SPI(2, 10000000, sck=Pin(18), mosi=Pin(23))
ss = Pin(5, Pin.OUT)
display = max7219.Matrix8x8(spi, ss, 4)
display.fill(0)
display.brightness(5)
while True:
for x in range(10):
display.pixel(randint(0, 31), randint(0, 7),1)
display.show()
sleep_ms(15)
display.fill(0)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
from setuptools import setup, find_packages
version = '0.9.4'
setup(name='workerpool',
version=version,
description="Module for distributing jobs to a pool of worker threads.",
long_description="""\
Performing tasks in many threads made fun!
This module facilitates distributing simple operations into jobs that are sent
to worker threads, maintained by a pool object.
It consists of these components:
1. Jobs, which are single units of work that need to be performed.
2. Workers, who grab jobs from a queue and perform them.
3. Worker pool, which keeps track of workers and the job queue.
""",
# Strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
keywords='pooling threading jobs',
author='Andrey Petrov',
author_email='andrey.petrov@shazow.net',
url='https://github.com/shazow/workerpool',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
"six"
],
entry_points="""
# -*- Entry points: -*-
""",
test_suite='nose.collector'
)
|
nilq/baby-python
|
python
|
"""
For more details, see the class documentation.
"""
from django.db.models import Q
from map_annotate_app.dto import CrimeDTO
from map_annotate_app.extras import Location
from map_annotate_app.models import Crime
class CrimeDAO:
"""
This class represents the data access layer for a crime record.
"""
def __init__(self):
pass
@staticmethod
def get_crime_list(crime_filter):
"""
Gets crimes which pass the filter from database.
C{crime_filter} is an object of class C{CrimeFilter} which is used to filter out the crimes.
Returns a list of C{CrimeDTO} objects which satisfy the C{crime_filter}
"""
return_list = []
crime_obj = Crime.objects
if len(crime_filter.type_id_list) > 0:
filter_type_parameter = Q(type_id=crime_filter.type_id_list[0])
for type_id in crime_filter.type_id_list[1:]:
filter_type_parameter = filter_type_parameter | Q(type_id=type_id)
crime_obj = crime_obj.filter(filter_type_parameter)
if crime_filter.north_east and crime_filter.south_west:
# TODO: May cause errors when longitude varies from +180 to -180
crime_obj = crime_obj.filter(location__lat__lte=crime_filter.north_east.lat,
location__lat__gte=crime_filter.south_west.lat,
location__lng__lte=crime_filter.north_east.lng,
location__lng__gte=crime_filter.south_west.lng, )
if crime_filter.dateFrom:
crime_obj = crime_obj.filter(timestamp__gte=crime_filter.dateFrom)
if crime_filter.dateTo:
crime_obj = crime_obj.filter(timestamp__lte=crime_filter.dateTo)
result_set = crime_obj.select_related('location', 'type').all()
for each in result_set:
crime_data_dto = CrimeDTO.CrimeDTO()
crime_data_dto.type = str(each.type.crime_type)
# crime_data_dto.type = "mobile theft"
crime_data_dto.fir_no = "\"" + str(each.fir_number) + "\""
crime_data_dto.location = Location.Location(each.location.lat, each.location.lng)
# crime_data_dto.location = Location.Location(23, 45)
crime_data_dto.timestamp = each.timestamp.strftime("%d %B, %Y, %H:%M")
crime_data_dto.url_link = "http://www.zipnet.in"
return_list.append(crime_data_dto)
# return_list.append(Pin.Pin(crime_data_dto.location, [crime_data_dto], [], []))
return return_list
|
nilq/baby-python
|
python
|
"""
$url mediavitrina.ru
$type live
$region Russia
"""
import logging
import re
from urllib.parse import urlparse
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.hls import HLSStream
from streamlink.utils.url import update_qsd
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(r"""https?://(?:www\.)?(?:
5-tv
|
chetv
|
ctc(?:love)?
|
domashniy
)\.ru/(?:live|online)""", re.VERBOSE))
@pluginmatcher(re.compile(r"https?://ren\.tv/live"))
@pluginmatcher(re.compile(r"https?://player\.mediavitrina\.ru/.+/player\.html"))
class MediaVitrina(Plugin):
_re_url_json = re.compile(r"https://media\.mediavitrina\.ru/(?:proxy)?api/v2/\w+/playlist/[\w-]+_as_array\.json")
def _get_streams(self):
self.session.http.headers.update({"Referer": self.url})
p_netloc = urlparse(self.url).netloc
if p_netloc == "player.mediavitrina.ru":
# https://player.mediavitrina.ru/
url_player = self.url
elif p_netloc.endswith("ctc.ru"):
# https://ctc.ru/online/
url_player = self.session.http.get(
"https://ctc.ru/api/page/v1/online/",
schema=validate.Schema(
validate.parse_json(),
{"content": validate.all(
[dict],
validate.filter(lambda n: n.get("type") == "on-air"),
[{"onAirLink": validate.url(netloc="player.mediavitrina.ru")}],
validate.get((0, "onAirLink"))
)},
validate.get("content")
)
)
else:
# https://chetv.ru/online/
# https://ctclove.ru/online/
# https://domashniy.ru/online/
# https://ren.tv/live
# https://www.5-tv.ru/online/
url_player = self.session.http.get(self.url, schema=validate.Schema(
validate.parse_html(),
validate.xml_xpath_string(".//iframe[starts-with(@src,'https://player.mediavitrina.ru/')]/@src"),
))
if not url_player:
return
log.debug(f"url_player={url_player}")
script_data = self.session.http.get(url_player, schema=validate.Schema(
validate.parse_html(),
validate.xml_xpath_string(".//script[contains(text(),'media.mediavitrina.ru/')]/text()"),
))
if not script_data:
log.debug("invalid script_data")
return
m = self._re_url_json.search(script_data)
if not m:
log.debug("invalid url_json")
return
url_json = m.group(0)
log.debug(f"url_json={url_json}")
res_token = self.session.http.get(
"https://media.mediavitrina.ru/get_token",
schema=validate.Schema(
validate.parse_json(),
{"result": {"token": str}},
validate.get("result"),
))
url = self.session.http.get(
update_qsd(url_json, qsd=res_token),
schema=validate.Schema(
validate.parse_json(),
{"hls": [validate.url()]},
validate.get(("hls", 0)),
))
if not url:
return
if "georestrictions" in url:
log.error("Stream is geo-restricted")
return
return HLSStream.parse_variant_playlist(self.session, url, name_fmt="{pixels}_{bitrate}")
__plugin__ = MediaVitrina
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
import sys
from socket import *
from time import strftime
import datetime
def main():
if len(sys.argv) < 4:
print("completion_logger_server.py <listen address> <listen port> <log file>")
exit(1)
host = sys.argv[1]
port = int(sys.argv[2])
buf = 1024 * 8
addr = (host,port)
# Create socket and bind to address
UDPSock = socket(AF_INET,SOCK_DGRAM)
UDPSock.bind(addr)
print("Listing on {0}:{1} and logging to '{2}'".format(host, port, sys.argv[3]))
# Open the logging file.
f = open(sys.argv[3], "a")
# Receive messages
while 1:
data,addr = UDPSock.recvfrom(buf)
if not data:
break
else:
f.write("{ ");
f.write("\"time\": \"{0}\"".format(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')))
f.write(", \"sender\": \"{0}\" ".format(addr[0]))
f.write(", \"data\": ")
f.write(data)
f.write(" }\n")
f.flush()
# Close socket
UDPSock.close()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/python
"""
python 1.5.2 lacks some networking routines. This module implements
them (as I don't want to drop 1.5.2 compatibility atm)
"""
# $Id: net.py,v 1.2 2001/11/19 00:47:49 ivo Exp $
from string import split
import socket, fcntl, FCNTL
def inet_aton(str):
"""
convert quated dot notation to a int
python 2.x's inet_aton returns a string containing the network
representation. This is according to the C inet_aton
"""
n = 0
quads = split(str, ".")
if len(quads) != 4:
raise socket.error, "illegal IP address string passed to inet_aton"
for i in quads:
try:
j = int(i)
if not(0 <= j <= 255):
raise socket.error, \
"illegal IP address string passed to inet_aton"
except ValueError:
raise socket.error, "illegal IP address string passed to inet_aton"
n = (int(quads[0]) << 24) + (int(quads[1]) << 16) + \
(int(quads[2]) << 8) + int(quads[3])
return n
def inet_ntoa(addr):
"""
Do the reverse of inet_aton, return the quad notation of 'addr'
which is a long containing the network address
"""
quad = [0,0,0,0]
for i in (0,1,2,3):
quad[i] = (addr >> (8*(3-i))) & 0xFF
return "%u.%u.%u.%u" % tuple(quad)
def make_nonblocking(fd):
fl = fcntl.fcntl(fd, FCNTL.F_GETFL)
try:
fcntl.fcntl(fd, FCNTL.F_SETFL, fl | FCNTL.O_NDELAY)
except AttributeError:
fcntl.fcntl(fd, FCNTL.F_SETFL, fl | FCNTL.FNDELAY)
if __name__ == '__main__':
print "Testing inet_aton"
for i in ('0.0.0.0', '127.0.0.1', '255.255.255.255', '10.0.0.1'):
print "%s -> %lu" % (i, inet_aton(i))
print "The following wil fail"
for i in ('0.0.0.0.0', '127.0.0', '256.255.255.255', 'www.amaze.nl'):
try:
print "%s -> %lu" % (i, inet_aton(i))
except socket.error:
print "Could not translate %s" % i
print "Testing inet_ntoa"
for i in ('0.0.0.0', '127.0.0.1', '255.255.255.255', '10.0.0.1'):
print "%s -> %s" % (i, inet_ntoa(inet_aton(i)))
|
nilq/baby-python
|
python
|
""" this is for pytest to import everything smoothly """
import os
import sys
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
|
nilq/baby-python
|
python
|
from fastapi import FastAPI
from fastapi.responses import JSONResponse
app = FastAPI()
store = {
'demo': 'this is important data!'
}
@app.get('/')
# Return all key-value pairs
def read_keys():
return store
@app.post('/')
# Create a new key-value pair
def create_key(key: str, value: str):
store[key] = value
return {key: store[key]}
|
nilq/baby-python
|
python
|
import pytest
import logging
import tempfile
from lindh import jsondb
# Logging
FORMAT = '%(asctime)s [%(threadName)s] %(filename)s +%(levelno)s ' + \
'%(funcName)s %(levelname)s %(message)s'
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
@pytest.fixture(scope='function')
def db():
db = jsondb.Database(root=tempfile.mkdtemp(prefix='jsondb-'))
yield db
db.destroy()
def test_init(db):
assert db is not None
def test_save(db):
o = db.save({'a': 1})
assert '_id' in o.keys()
assert o['_id'] is not None
assert db.has(o['_id'])
def test_get(db):
o = db.save({'a': 1})
new_id = o['_id']
assert new_id is not None
o = db.get(new_id)
assert o is not None
assert o['a'] == 1
assert '_id' in o.keys()
assert o['_id'] == new_id
assert '_rev' in o.keys()
def test_get_2(db):
o1 = db.save({'a': 1})
new_id_1 = o1['_id']
assert new_id_1 is not None
o2 = db.save({'b': 2})
new_id_2 = o2['_id']
assert new_id_2 is not None
o1 = db.get(new_id_1)
assert o1 is not None
assert o1['a'] == 1
assert '_id' in o1.keys()
assert o1['_id'] == new_id_1
assert '_rev' in o1.keys()
o2 = db.get(new_id_2)
assert o2 is not None
assert o2['b'] == 2
assert '_id' in o2.keys()
assert o2['_id'] == new_id_2
assert '_rev' in o2.keys()
def test_delete(db):
o = db.save({'a': 1})
new_id = o['_id']
assert new_id is not None
db.delete(new_id)
assert not db.has(new_id)
def test_update(db):
o = db.save({'a': 1})
new_id = o['_id']
first_rev = o['_rev']
assert first_rev is not None
assert new_id is not None
o['a'] = 2
o = db.save(o)
assert o['a'] == 2
second_rev = o['_rev']
assert second_rev is not None
assert first_rev != second_rev
o = db.get(new_id)
assert o['a'] == 2
assert o['_rev'] == second_rev
def test_view_just_save(db):
db.define('b_by_a', lambda o: (o['a'], o['b']))
db.save({'a': 2, 'b': 22})
db.save({'a': 3, 'b': 33})
db.save({'a': 1, 'b': 11})
r = db.view('b_by_a')
r = list(r)
assert len(r) == 3
assert r[0] == {'id': 2, 'key': 1, 'value': 11}
assert r[1] == {'id': 0, 'key': 2, 'value': 22}
assert r[2] == {'id': 1, 'key': 3, 'value': 33}
def test_view_save_and_update_value(db):
db.define('b_by_a', lambda o: (o['a'], o['b']))
db.save({'a': 2, 'b': 22})
db.save({'a': 3, 'b': 33})
o1 = db.save({'a': 1, 'b': 11})
o1['b'] = 1111
db.save(o1)
r = db.view('b_by_a')
r = list(r)
assert len(r) == 3
assert r[0] == {'id': 2, 'key': 1, 'value': 1111}
assert r[1] == {'id': 0, 'key': 2, 'value': 22}
assert r[2] == {'id': 1, 'key': 3, 'value': 33}
def test_view_save_and_delete(db):
db.define('b_by_a', lambda o: (o['a'], o['b']))
o2 = db.save({'a': 2, 'b': 22})
db.save({'a': 3, 'b': 33})
db.save({'a': 1, 'b': 11})
db.delete(o2['_id'])
r = db.view('b_by_a')
r = list(r)
assert len(r) == 2
assert r[0] == {'id': 2, 'key': 1, 'value': 11}
assert r[1] == {'id': 1, 'key': 3, 'value': 33}
def test_view_kickstart(db):
db.save({'a': 2, 'b': 22})
db.save({'a': 3, 'b': 33})
db.save({'a': 1, 'b': 11})
db.define('b_by_a', lambda o: (o['a'], o['b']))
r = db.view('b_by_a')
r = list(r)
assert len(r) == 3
assert r[0] == {'id': 2, 'key': 1, 'value': 11}
assert r[1] == {'id': 0, 'key': 2, 'value': 22}
assert r[2] == {'id': 1, 'key': 3, 'value': 33}
def test_view_by_key(db):
db.save({'a': 2, 'b': 22})
db.save({'a': 3, 'b': 33})
db.save({'a': 1, 'b': 11})
db.define('b_by_a', lambda o: (o['a'], o['b']))
r = list(db.view('b_by_a', key=2))
assert len(r) == 1
assert r[0] == {'id': 0, 'key': 2, 'value': 22}
def test_view_by_key_string(db):
db.save({'a': '2', 'b': 22})
db.save({'a': '3', 'b': 33})
db.save({'a': '1', 'b': 11})
db.define('b_by_a', lambda o: (o['a'], o['b']))
r = list(db.view('b_by_a', key='2'))
assert len(r) == 1
assert r[0] == {'id': 0, 'key': '2', 'value': 22}
def test_view_by_key_two_values_same_key_before(db):
db.define('b_by_a', lambda o: (o['a'], o['b']))
db.save({'a': 2, 'b': 22})
db.save({'a': 3, 'b': 33})
db.save({'a': 1, 'b': 11})
db.save({'a': 2, 'b': 44})
r = list(db.view('b_by_a', key=2))
assert len(r) == 2
assert r[0] == {'id': 0, 'key': 2, 'value': 22}
assert r[1] == {'id': 3, 'key': 2, 'value': 44}
def test_view_by_key_two_values_same_key_after(db):
db.save({'a': 2, 'b': 22})
db.save({'a': 3, 'b': 33})
db.save({'a': 1, 'b': 11})
db.save({'a': 2, 'b': 44})
db.define('b_by_a', lambda o: (o['a'], o['b']))
r = list(db.view('b_by_a', key=2))
assert len(r) == 2
assert r[0] == {'id': 0, 'key': 2, 'value': 22}
assert r[1] == {'id': 3, 'key': 2, 'value': 44}
def test_view_by_startkey(db):
db.save({'a': 2, 'b': 22})
db.save({'a': 3, 'b': 33})
db.save({'a': 1, 'b': 11})
db.define('b_by_a', lambda o: (o['a'], o['b']))
r = list(db.view('b_by_a', startkey=2))
assert len(r) == 2
assert r[0] == {'id': 0, 'key': 2, 'value': 22}
assert r[1] == {'id': 1, 'key': 3, 'value': 33}
def test_view_by_startkey_after(db):
db.save({'a': 3, 'b': 33})
db.save({'a': 4, 'b': 44})
db.save({'a': 1, 'b': 11})
db.define('b_by_a', lambda o: (o['a'], o['b']))
r = list(db.view('b_by_a', startkey=2))
assert len(r) == 2
assert r[0] == {'id': 0, 'key': 3, 'value': 33}
assert r[1] == {'id': 1, 'key': 4, 'value': 44}
def test_view_by_endkey(db):
db.save({'a': 2, 'b': 22})
db.save({'a': 3, 'b': 33})
db.save({'a': 1, 'b': 11})
db.define('b_by_a', lambda o: (o['a'], o['b']))
r = list(db.view('b_by_a', endkey=2))
assert len(r) == 2
assert r[0] == {'id': 2, 'key': 1, 'value': 11}
assert r[1] == {'id': 0, 'key': 2, 'value': 22}
def test_view_by_endkey_after(db):
db.save({'a': 2, 'b': 22})
db.save({'a': 4, 'b': 44})
db.save({'a': 1, 'b': 11})
db.define('b_by_a', lambda o: (o['a'], o['b']))
r = list(db.view('b_by_a', endkey=3))
assert len(r) == 2
assert r[0] == {'id': 2, 'key': 1, 'value': 11}
assert r[1] == {'id': 0, 'key': 2, 'value': 22}
def test_add_with_custom_keys(db):
db['a'] = {'a': 2, 'b': 22}
db[1] = {'a': 3, 'b': 33}
db[('a', 1)] = {'a': 1, 'b': 11}
assert db['a'] == {'_id': 'a', '_rev': 0, 'a': 2, 'b': 22}
assert db[1] == {'_id': 1, '_rev': 0, 'a': 3, 'b': 33}
assert db[('a', 1)] == {'_id': ['a', 1], '_rev': 0, 'a': 1, 'b': 11}
def test_include_docs(db):
db.define('by_id', lambda o: (o['_id'], 1))
db[1] = {1: 11}
db[2] = {2: 12}
db[5] = {5: 15}
db[7] = {7: 17}
r = list(db.view('by_id', include_docs=True))
assert r[0] == {'id': 1, 'key': 1, 'value': 1,
'doc': {'_id': 1, '_rev': 0, '1': 11}}
assert r[1] == {'id': 2, 'key': 2, 'value': 1,
'doc': {'_id': 2, '_rev': 0, '2': 12}}
assert r[2] == {'id': 5, 'key': 5, 'value': 1,
'doc': {'_id': 5, '_rev': 0, '5': 15}}
assert r[3] == {'id': 7, 'key': 7, 'value': 1,
'doc': {'_id': 7, '_rev': 0, '7': 17}}
def test_yielding_mapping_function(db):
def yielder(o):
yield (o['a'], 1), o['b']
yield (o['a'], 2), o['b'] * 2
yield (o['a'], 3), o['b'] * 3
db.save({'a': 2, 'b': 22})
db.save({'a': 3, 'b': 33})
db.save({'a': 1, 'b': 11})
db.define('b_by_a', yielder)
r = db.view('b_by_a')
r = list(r)
assert len(r) == 9
assert r[0] == {'id': 2, 'key': (1, 1), 'value': 11}
assert r[1] == {'id': 2, 'key': (1, 2), 'value': 22}
assert r[2] == {'id': 2, 'key': (1, 3), 'value': 33}
assert r[3] == {'id': 0, 'key': (2, 1), 'value': 22}
assert r[4] == {'id': 0, 'key': (2, 2), 'value': 44}
assert r[5] == {'id': 0, 'key': (2, 3), 'value': 66}
assert r[6] == {'id': 1, 'key': (3, 1), 'value': 33}
assert r[7] == {'id': 1, 'key': (3, 2), 'value': 66}
assert r[8] == {'id': 1, 'key': (3, 3), 'value': 99}
def test_reduce_by_group(db):
def sum_per(field, values):
result = {}
for value in values:
v = value.get(field)
if v in result:
result[v] += 1
else:
result[v] = 1
return result
db.define('test',
lambda o: (o['category'], {'state': o['state']}),
lambda keys, values, rereduce: sum_per('state', values))
db.save({'category': 'a', 'state': 'new'})
db.save({'category': 'b', 'state': 'new'})
db.save({'category': 'a', 'state': 'old'})
db.save({'category': 'b', 'state': 'new'})
db.save({'category': 'a', 'state': 'old'})
db.save({'category': 'a', 'state': 'new'})
db.save({'category': 'c', 'state': 'new'})
db.save({'category': 'c', 'state': 'old'})
db.save({'category': 'a', 'state': 'new'})
db.save({'category': 'a', 'state': 'new'})
r = list(db.view('test', group=True))
print(r)
assert r[0] == {'key': 'a', 'value': {'new': 4, 'old': 2}}
assert r[1] == {'key': 'b', 'value': {'new': 2}}
assert r[2] == {'key': 'c', 'value': {'new': 1, 'old': 1}}
def test_skip(db):
db.define('by_id', lambda o: (o['_id'], 1))
db[1] = {1: 11}
db[2] = {2: 12}
db[5] = {5: 15}
db[7] = {7: 17}
r = list(db.view('by_id', include_docs=True, skip=2))
assert r[0] == {'id': 5, 'key': 5, 'value': 1,
'doc': {'_id': 5, '_rev': 0, '5': 15}}
assert r[1] == {'id': 7, 'key': 7, 'value': 1,
'doc': {'_id': 7, '_rev': 0, '7': 17}}
def test_limit(db):
db.define('by_id', lambda o: (o['_id'], 1))
db[1] = {1: 11}
db[2] = {2: 12}
db[5] = {5: 15}
db[7] = {7: 17}
r = list(db.view('by_id', include_docs=True, limit=2))
assert r[0] == {'id': 1, 'key': 1, 'value': 1,
'doc': {'_id': 1, '_rev': 0, '1': 11}}
assert r[1] == {'id': 2, 'key': 2, 'value': 1,
'doc': {'_id': 2, '_rev': 0, '2': 12}}
def test_skip_and_limit(db):
db.define('by_id', lambda o: (o['_id'], 1))
db[1] = {1: 11}
db[2] = {2: 12}
db[5] = {5: 15}
db[7] = {7: 17}
r = list(db.view('by_id', include_docs=True, skip=1, limit=2))
assert r[0] == {'id': 2, 'key': 2, 'value': 1,
'doc': {'_id': 2, '_rev': 0, '2': 12}}
assert r[1] == {'id': 5, 'key': 5, 'value': 1,
'doc': {'_id': 5, '_rev': 0, '5': 15}}
|
nilq/baby-python
|
python
|
import sys
import os
from PIL import Image, ImageDraw
# Add scripts dir to python search path
sys.path.append(os.path.dirname(os.path.abspath(sys.argv[0])))
from maps_def import maps as MAPS
BORDERS = True
IDS = True
def bake_map(tiles, info):
size = tiles[0].size[0]
res = Image.new("RGB", (len(info[0]) * size, len(info) * size))
z_d = ImageDraw.Draw(res)
for y, line in enumerate(info):
for x, tile in enumerate(line):
res.paste(tiles[tile[0]].rotate(-90 * tile[1]),
(x * size, (len(info) - 1) * size - y * size))
# naming
if IDS:
z_d.text((x * size + 10,
(len(info) - 1) * size - y * size + 2),
str(tile[0]),
fill=(255, 0, 0))
# Tiles borders
if BORDERS:
for i in range(len(info)):
z_d.line((0, i * size, len(info[0]) * size, i * size), fill=(0, 0, 100))
# vertical
for i in range(len(info[0])):
z_d.line((i * size, 0, i * size, len(info) * size), fill=(0, 0, 100))
return res
def read_info(map_name):
atls_cnt, y, x = MAPS[map_name.lower()][0]
tmp = MAPS[map_name.lower()][1:]
res = [tmp[i*x:(i+1)*x] for i in range(y)]
return atls_cnt, res
def read_tiles(tiles_path, map_name, tilesets_count):
res = []
for i in range(tilesets_count):
if not os.path.isfile(os.path.join(tiles_path, "{:}{:03d}.png".format(map_name, i))):
print("No such file:", os.path.join(tiles_path, "{:}{:03d}.png".format(map_name, i)))
sys.exit(-2)
atlas = Image.open(os.path.join(tiles_path, "{:}{:03d}.png".format(map_name, i))).transpose(Image.FLIP_TOP_BOTTOM)
t_size = atlas.size[0] // 8
frame = t_size // 8
usful = t_size * 3 // 4
for y in range(8):
for x in range(8):
res.append(atlas.crop((x * t_size + frame,
y * t_size + frame,
x * t_size + frame + usful,
y * t_size + frame + usful)).transpose(Image.FLIP_TOP_BOTTOM))
return res
if __name__ == "__main__":
if os.environ.get("DONT_CHANGE_CWD", "0").lower() not in ("1", "yes", "true", "on"):
os.chdir(os.path.dirname(os.path.abspath(sys.argv[0])))
if len(sys.argv) != 3:
print("Usage: check_map map_name tiles_dir")
sys.exit(0)
map_name = sys.argv[1]
tiles_path = sys.argv[2]
if map_name.lower() not in MAPS.keys() or \
map_name not in ["BaseGipat", "bz2g", "bz3g", "bz4g", "bz5g", "bz6g", "Zone1", "Zone2", "Zone3Obr", "Zone4", "Zone6", "Zone6_2", "Zone7", "Zone8", "zone9", "ZoneMainMenuNew", "bz10k", "bz8k", "bz9k", "Zone11", "Zone12", "Zone13", "bz11k", "Zone14", "bz13h", "bz16h", "Zone15", "Zone18", "Zone19", "bz14h", "bz15h", "bz18h", "Bz7g", "Zone16", "Zone17", "Zone20", "Zone5_1", "Zone10"]:
print("Unknown map:", map_name)
sys.exit(-1)
tilesets_count, info = read_info(map_name)
tiles = read_tiles(tiles_path, map_name, tilesets_count)
res = bake_map(tiles, info)
res.save("map_checker.png")
|
nilq/baby-python
|
python
|
#@+leo-ver=5-thin
#@+node:edream.110203113231.741: * @file ../plugins/add_directives.py
"""Allows users to define new @direcives."""
from leo.core import leoGlobals as g
directives = ("markup",) # A tuple with one string.
#@+others
#@+node:ekr.20070725103420: ** init
def init():
"""Return True if the plugin has loaded successfully."""
g.registerHandler("start1", addPluginDirectives)
return True
#@+node:edream.110203113231.742: ** addPluginDirectives
def addPluginDirectives(tag, keywords):
"""Add all new directives to g.globalDirectiveList"""
global directives
for s in directives:
if s.startswith('@'):
s = s[1:]
if s not in g.globalDirectiveList:
g.globalDirectiveList.append(s)
#@-others
#@@language python
#@@tabwidth -4
#@-leo
|
nilq/baby-python
|
python
|
# Microsoft API results index & search features generator
"""
Copyright 2016 Fabric S.P.A, Emmanuel Benazera, Alexandre Girard
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import os, sys
import json
import numpy as np
import shelve
import hashlib
from feature_generator import FeatureGenerator
from index_search import Indexer, Searcher
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class MAPIGenerator(FeatureGenerator):
def __init__(self,image_files,json_files,json_emo_files,index_repo,name,description,tate=False,meta_in='',meta_out='',captions_in='',captions_out=''):
self.name = name
self.description = description
self.tate = tate
self.meta_in = meta_in
self.meta_out = meta_out
self.captions_in = captions_in
self.captions_out = captions_out
self.image_files = image_files
self.json_files = json_files
self.json_emo_files = json_emo_files
self.index_repo = index_repo + '/' + self.name
try:
os.mkdir(self.index_repo)
except:
pass
if self.captions_in == '':
self.captions_in = self.index_repo + '/in_captions.bin'
if self.captions_out == '':
self.captions_out = self.index_repo + '/out_captions.bin'
self.mapi_dominant_colors = {}
self.mapi_tags = {}
self.mapi_categories = {}
self.mapi_people = {}
self.mapi_faces = {} # face + gender + age + emotion
self.mapi_captions = {}
self.stm = {}
self.st = shelve.open(self.index_repo + '/all_tags.bin')
self.scm = {}
self.sc = shelve.open(self.index_repo + '/all_cats.bin')
self.emotions={'anger':0,'contempt':1,'disgust':2,'fear':3,'happiness':4,'neutral':5,'sadness':6,'surprise':7}
return
def __del__(self):
for i,t in self.stm.iteritems():
self.st[i] = t
self.st.close()
for i,c in self.stm.iteritems():
self.sc[i] = t
self.sc.close()
# fuzzy matching of rectangles since M$ API do not return the same exact face rectangles with Vision and Emotion API...
def equal_box(self,box1,box2):
rtol = 0.05
if np.isclose(box1['height'],box2['height'],rtol=rtol) and np.isclose(box1['left'],box2['left'],rtol=rtol) and np.isclose(box1['top'],box2['top'],rtol=rtol) and np.isclose(box1['width'],box2['width'],rtol=rtol):
return True
else:
return False
def has_box(self,newbox,boxes):
n = 0
for b in boxes:
if self.equal_box(newbox['faceRectangle'],b['faceRectangle']):
return n
n = n + 1
return -1
def face_vector(self,fv):
vec = [0.0] * 10
vec[0] = fv.get('age',-1)
gender = -1
g = fv.get('gender',None)
if g:
if g == 'Male':
gender = 1
else:
gender = 2
vec[1] = gender
v_emos = fv.get('emotions',None)
if v_emos:
for e,pos in self.emotions.iteritems():
if v_emos.get(e,None):
vec[2+pos] = v_emos[e]
return vec
def box_hash(self,box):
m = hashlib.md5()
for c,v in box.iteritems():
m.update(str(v))
ha = m.hexdigest()
return ha
def preproc(self):
## prepare fields to be indexed:
# - dominantColors
# - tags (no scores) -> too generic... take top 5 and attach uniform scores
# - categories + scores -> keep scores > 0.3
# - faces + age + gender + emotion (from emotion JSON / API) -> encode age + gender + emotion (8 categories) into vector
if self.tate:
ext = '.jpg'
else:
ext = ''
img_bn = ''
for jf in self.json_files:
with open(jf,'r') as jfile:
json_data = json.load(jfile)
if not json_data:
continue
if not img_bn:
jf = jf.replace('//','/')
img_bn = os.path.dirname(os.path.dirname(jf))
img_name = img_bn + '/' + os.path.basename(jf).replace('_mapi.json',ext)
if not img_name in self.image_files:
continue
if json_data.get('color',None):
self.mapi_dominant_colors[img_name] = []
for c in json_data['color']['dominantColors']:
self.mapi_dominant_colors[img_name].append({'cat':c,'prob':0.1})
if json_data.get('description',None):
self.mapi_tags[img_name] = []
for t in json_data['description']['tags'][:5]:
self.mapi_tags[img_name].append({'cat':t.replace('_',' '),'prob':0.2})
if json_data.get('categories',None):
jd_cats = json_data['categories']
for c in jd_cats:
self.mapi_categories[img_name] = []
if c['score'] >= 0.3:
self.mapi_categories[img_name].append({'cat':c['name'].replace('_',' '),'prob':c['score']})
if json_data.get('faces',None):
npeople = 0
nmales = 0
nfemales = 0
self.mapi_faces[img_name] = []
jd_faces = json_data['faces']
for jf in jd_faces:
self.mapi_faces[img_name].append(jf)
npeople += 1
gender = jf.get('gender',None)
if gender == 'Male':
nmales += 1
else:
nfemales += 1
self.mapi_people[img_name] = [npeople,nmales,nfemales]
#print self.mapi_people[img_name]
if json_data.get('description',None):
caption = json_data['description'].get('captions',None)
if caption:
caption = caption[0]['text']
self.mapi_captions[img_name] = caption
for jf in self.json_emo_files:
with open(jf,'r') as jfile:
json_data = json.load(jfile)
img_name = img_bn + '/' + os.path.basename(jf).replace('_mapi.json','.jpg')
if not img_name in self.image_files:
continue
if len(json_data) == 0:
continue
if self.mapi_faces.get(img_name,None) == None:
#print 'face detected with emotion API but not with Vision API...'
self.mapi_faces[img_name] = json_data
continue
npeople = 0
emosum = [0.0]*len(self.emotions)
for r in json_data:
n = self.has_box(r,self.mapi_faces[img_name])
if n == -1:
continue
emo_scores = r['scores']
has_emo = False
for e,c in self.emotions.iteritems():
emosum[c] += emo_scores[e]
if emo_scores[e] > 0.5:
if not has_emo:
self.mapi_faces[img_name][n]['emotions'] = {}
has_emo = True
self.mapi_faces[img_name][n]['emotions'][e] = emo_scores[e]
npeople = npeople + 1
if img_name in self.mapi_people:
self.mapi_people[img_name] = self.mapi_people[img_name] + emosum
else:
self.mapi_people[img_name] = [npeople,0.0,0.0] + emosum
return
def index(self):
## index every variable type
# - dominant colors (XXX: let's not match based on this, DNN does much better)
#with Indexer(dim=1,repository=self.index_repo,db_name='colors.bin') as indexer:
# for c,v in self.mapi_dominant_colors.iteritems():
# indexer.index_tags_single(v,c)
# - tags
#print 'indexing mapi tags...'
if self.tate:
with Indexer(dim=1,repository=self.index_repo,db_name='tags.bin') as indexer:
for t,v in self.mapi_tags.iteritems():
indexer.index_tags_single(v,t)
self.stm[t] = []
for tc in v:
self.stm[t].append(tc['cat'])
# - categories
#print 'indexing mapi categories...'
if self.tate:
with Indexer(dim=1,repository=self.index_repo,db_name='cats.bin') as indexer:
for t,v in self.mapi_categories.iteritems():
indexer.index_tags_single(v,t)
self.scm[t] = []
for tc in v:
self.scm[t].append(tc['cat'])
# - number of people and gender
# as a vector [npeople, males, females]
if self.tate:
with Indexer(dim=11,repository=self.index_repo,index_name='people.ann',db_name='people.bin') as indexer:
c = 0
#print 'indexing', len(self.mapi_people),'people'
for t,v in self.mapi_people.iteritems():
if len(v) < 11:
v = v + [0.0]*len(self.emotions) # if no emotion detected
indexer.index_single(c,v,t)
c = c + 1
indexer.build_index()
indexer.save_index()
# - vector for age + gender + emotion + save boxes
#print 'indexing mapi age, gender, emotion and boxes...'
if self.tate:
#c = 0
with Indexer(dim=10,repository=self.index_repo) as indexer:
ldb = shelve.open(self.index_repo + '/ldata.bin')
for f,v in self.mapi_faces.iteritems():
if len(v) > 0:
rec = {'faceRectangles':[],'emotions':[],'genders':[],'ages':[]}
for fv in v:
vec = self.face_vector(fv)
indexer.index_single(c,vec,f)
ldb[str(c)] = (fv,f)
c = c + 1
if 'age' in fv:
rec['ages'].append(fv['age'])
if 'emotion' in fv:
rec['emotions'].append(fv['emotions'])
if 'gender' in fv:
rec['genders'].append(fv['gender'])
if 'faceRectangle' in fv:
rec['faceRectangles'].append(fv['faceRectangle'])
ldb[f] = rec
ldb.close()
indexer.build_index()
indexer.save_index()
else:
ldb = shelve.open(self.index_repo + '/out_ldata.bin')
for f,v in self.mapi_faces.iteritems():
rec = {'faceRectangles':[],'emotions':[],'genders':[],'ages':[]}
for fv in v:
if 'age' in fv:
rec['ages'].append(fv['age'])
if 'emotions' in fv:
rec['emotions'].append(fv['emotions'])
if 'gender' in fv:
rec['genders'].append(fv['gender'])
if 'faceRectangle' in fv:
rec['faceRectangles'].append(fv['faceRectangle'])
#print 'indexing=',f,fv
ldb[f] = rec
ldb.close()
# save captions
dbname = '/out_captions.bin'
if self.tate:
dbname = '/in_captions.bin'
ldb = shelve.open(self.index_repo + dbname)
for i,c in self.mapi_captions.iteritems():
ldb[os.path.basename(str(i))] = c.encode('utf8')
#print 'indexing',os.path.basename(str(i)),' / ',c.encode('utf8')
ldb.close()
return
def search(self,jdataout={}):
results_tags = {}
with Searcher(self.index_repo,search_size=1000,db_name='tags.bin') as searcher:
searcher.load_index()
for t,v in self.mapi_tags.iteritems():
nns =searcher.search_tags_single(v,t)
nns['tags_out_all'] = []
for nn in nns['nns_uris']:
nns['tags_out_all'].append(self.st.get(str(nn),''))
results_tags[t] = nns
results_tags = self.to_json(results_tags,'/img/reuters/','/img/tate/',self.name+'_tags',self.description,jdataout,self.meta_in,self.meta_out,self.captions_in,self.captions_out,mapi_in=self.index_repo + '/ldata.bin',mapi_out=self.index_repo + '/out_ldata.bin')
#print 'results_tags=',results_tags
results_cats = {}
with Searcher(self.index_repo,search_size=1000,db_name='cats.bin') as searcher:
searcher.load_index()
for t,v in self.mapi_categories.iteritems():
nns =searcher.search_tags_single(v,t)
nns['tags_out_all'] = []
for nn in nns['nns_uris']:
nns['tags_out_all'].append(self.sc.get(str(nn),''))
results_cats[t] = nns
results_tmp = self.to_json(results_cats,'/img/reuters/','/img/tate/',self.name+'_cats',self.description,results_tags,self.meta_in,self.meta_out,self.captions_in,self.captions_out,mapi_in=self.index_repo + '/ldata.bin',mapi_out=self.index_repo + '/out_ldata.bin')
if not results_tmp:
results_tmp = results_tags
#print 'results_tmp=',results_tmp
results_cats = results_tmp
#results_people = {}
#with Searcher(self.index_repo,search_size=200,index_name='people.ann',db_name='people.bin') as searcher:
# searcher.load_index()
# for f,v in self.mapi_people.iteritems():
# if len(v) < 11:
# v = v + [0.0]*8
# nns = searcher.search_single(v,f)
#print 'nns=',nns
# results_people[f] = nns
#print 'results_people=',results_people
#results_tmp = self.to_json(results_people,'/img/reuters','/img/tate/',self.name+'_people',self.description,results_cats,self.meta_in,self.meta_out,self.captions_in,self.captions_out,mapi_in=self.index_repo + '/ldata.bin',mapi_out=self.index_repo + '/out_ldata.bin')
#if not results_people:
results_tmp = results_cats
results_faces = {}
with Searcher(self.index_repo,search_size=5000) as searcher:
searcher.load_index()
ldb = shelve.open(self.index_repo + '/ldata.bin')
for f,v in self.mapi_faces.iteritems():
resi = {} # results for this image
for fv in v:
vec = self.face_vector(fv)
nns = searcher.search_single(vec,f)
m = 0
in_face_hash = ''
faceR = fv.get('faceRectangle',{})
if faceR:
in_face_hash = self.box_hash(faceR)
age_in = fv.get('age',-1)
#print 'nns scores=',nns['nns'][1]
for nuri in nns['nns_uris']:
nn = nns['nns'][0][m]
nndata = ldb[str(nn)]
nndata0 = nndata[0]
nndata = ldb[nuri]
age_out = nndata0.get('age',-1)
if age_in > 0 and age_out > 0 and not age_in-10<=age_out<=age_in+10:
# print 'discarding based on age, age_in=',age_in,' / age_out=',age_out
continue
if not nuri in resi:
resi[nuri] = {'mapi_out':{'faceRectangles':[],'emotions':[],'genders':[],'ages':[],'boxids':[]},
'mapi_in':{'faceRectangles':[],'emotions':[],'genders':[],'ages':[],'boxids':[]},
'score':0.0}
if in_face_hash:
if not faceR in resi[nuri]['mapi_in']['faceRectangles']:
resi[nuri]['mapi_in']['faceRectangles'].append(faceR)
resi[nuri]['mapi_in']['emotions'].append(fv.get('emotions',{}))
resi[nuri]['mapi_in']['genders'].append(fv.get('gender',-1))
resi[nuri]['mapi_in']['ages'].append(age_in)
resi[nuri]['mapi_in']['boxids'].append([in_face_hash])
else:
bidx = resi[nuri]['mapi_in']['faceRectangles'].index(faceR)
resi[nuri]['mapi_in']['boxids'][bidx].append(in_face_hash)
nnfaceR = nndata0.get('faceRectangle',{})
if nnfaceR:
if not nnfaceR in resi[nuri]['mapi_out']['faceRectangles']:
resi[nuri]['mapi_out']['faceRectangles'].append(nnfaceR)
resi[nuri]['mapi_out']['emotions'].append(nndata0.get('emotions',{}))
resi[nuri]['mapi_out']['genders'].append(nndata0.get('gender',-1))
resi[nuri]['mapi_out']['ages'].append(age_out)
if in_face_hash:
resi[nuri]['mapi_out']['boxids'].append([in_face_hash])
resi[nuri]['score'] += 10.0*nns['nns'][1][m] + 0.5
elif in_face_hash:
bidx = resi[nuri]['mapi_out']['faceRectangles'].index(nnfaceR)
resi[nuri]['mapi_out']['boxids'][bidx].append(in_face_hash)
m = m + 1
# add uri array
nnns_uris = []
nnns = [[],[]]
for r in resi:
if r == 'nns_uris' or r == 'nns':
continue
nnns_uris.append(r)
nnns[0].append('') # dummy array
nnns[1].append(resi[r]['score'])
del resi[r]['score']
resi['nns_uris'] = nnns_uris
resi['nns'] = nnns
results_faces[f] = resi
ldb.close()
results_faces = self.to_json(results_faces,'/img/reuters/','/img/tate/',self.name,self.description,results_tmp,self.meta_in,self.meta_out,self.captions_in,self.captions_out,mapi_in=self.index_repo + '/ldata.bin',mapi_out=self.index_repo + '/out_ldata.bin')
if not results_faces:
results_faces = results_tmp
#print 'results_faces=',results_faces
return results_faces
|
nilq/baby-python
|
python
|
"""
This module is used to interface with classical HPC queuing systems.
"""
|
nilq/baby-python
|
python
|
from django.contrib import admin
from .models import SiteToCheck
@admin.register(SiteToCheck)
class SiteToCheckAdmin(admin.ModelAdmin):
list_display = ['url', 'last_status', 'last_response_time']
|
nilq/baby-python
|
python
|
# Based on https://github.com/petkaantonov/bluebird/blob/master/src/promise.js
from .compat import Queue
# https://docs.python.org/2/library/queue.html#Queue.Queue
LATE_QUEUE_CAPACITY = 0 # The queue size is infinite
NORMAL_QUEUE_CAPACITY = 0 # The queue size is infinite
class Async(object):
def __init__(self, schedule):
self.is_tick_used = False
self.late_queue = Queue(LATE_QUEUE_CAPACITY)
self.normal_queue = Queue(NORMAL_QUEUE_CAPACITY)
self.have_drained_queues = False
self.trampoline_enabled = True
self.schedule = schedule
def enable_trampoline(self):
self.trampoline_enabled = True
def disable_trampoline(self):
self.trampoline_enabled = False
def have_items_queued(self):
return self.is_tick_used or self.have_drained_queues
def _async_invoke_later(self, fn, context):
self.late_queue.put(fn)
self.queue_tick(context)
def _async_invoke(self, fn, context):
self.normal_queue.put(fn)
self.queue_tick(context)
def _async_settle_promise(self, promise):
self.normal_queue.put(promise)
self.queue_tick(context=promise._trace)
def invoke_later(self, fn, context):
if self.trampoline_enabled:
self._async_invoke_later(fn, context)
else:
self.schedule.call_later(0.1, fn)
def invoke(self, fn, context):
if self.trampoline_enabled:
self._async_invoke(fn, context)
else:
self.schedule.call(
fn
)
def settle_promises(self, promise):
if self.trampoline_enabled:
self._async_settle_promise(promise)
else:
self.schedule.call(
promise._settle_promises
)
def throw_later(self, reason):
def fn():
raise reason
self.schedule.call(fn)
fatal_error = throw_later
def drain_queue(self, queue):
from .promise import Promise
while not queue.empty():
fn = queue.get()
if (isinstance(fn, Promise)):
fn._settle_promises()
continue
fn()
def drain_queues(self):
assert self.is_tick_used
self.drain_queue(self.normal_queue)
self.reset()
self.have_drained_queues = True
self.drain_queue(self.late_queue)
def queue_context_tick(self):
if not self.is_tick_used:
self.is_tick_used = True
self.schedule.call(self.drain_queues)
def queue_tick(self, context):
if not context:
self.queue_context_tick()
else:
(context._parent or context).on_exit(self.queue_context_tick)
def reset(self):
self.is_tick_used = False
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import sys
import os
import time
#
# Generate the master out.grid
# Create a 3M point file of lat/lons - and write to ASCII file called out.grd.
# This file will be used as input to ucvm_query for medium scale test for images
#
if not os.path.exists("out.grd"):
print("Creating grd.out file.")
cmd="/app/ucvm/utilities/makegrid"
print(cmd)
os.system(cmd)
#
#
valid_model_strings = {"bbp1d":1,
"cca":1,
"wfcvm":1,
"albacore":1,
"cvlsu":1,
"ivlsu":1,
"cvms":1,
"cvmh":1,
"cvmsi":1,
"cvms5":1}
# Check model parameter
if len (sys.argv) < 2:
print("Input format: % make_mesh_model.py cvms")
sys.exit()
else:
model_string = sys.argv[1]
#
# Check if model is valid
print("Model string: {}".format(model_string))
try:
valid = valid_model_strings[model_string.strip()]
except:
print("Unknown model: {}".format(model_string))
for key in valid_model_strings.items():
print(key, valid_model_strings[key])
sys.exit()
#
# Call each of the installed crustal models and time how
# long it takes to populate the models
#
#
#
start = time.time()
cmd="ucvm_query -f /app/ucvm/conf/ucvm.conf -m %s < out.grd > mesh_%s.out"%(model_string,model_string)
print(cmd)
os.system(cmd)
end = time.time()
print("Mesh extraction for model {} : {} seconds".format(model_string,(end-start)))
|
nilq/baby-python
|
python
|
import trcdproc.navigate.raw as nav
from trcdproc.core import H5File
def test_all_signal_dataset_paths_are_found(organized_faulty_data: H5File):
"""Ensures that all dataset paths are found
"""
dataset_paths_found = {path for path in nav.all_signal_dataset_paths(organized_faulty_data)}
all_paths = []
organized_faulty_data.visit(lambda path: all_paths.append(path))
dataset_paths_present = {'/' + p for p in all_paths
if any(sig in p for sig in ['perp', 'par', 'ref'])
and 'faulty' not in p}
assert dataset_paths_found == dataset_paths_present
def test_all_pump_group_paths_are_found(organized_faulty_data: H5File):
"""Ensures that all of the pump/nopump groups are found, and that no faulty groups are picked up
"""
pump_groups_found = {path for path in nav.pump_group_paths(organized_faulty_data)}
all_paths = []
organized_faulty_data.visit(lambda path: all_paths.append(path))
pump_groups_present = {'/' + p for p in all_paths if p.endswith('pump')}
assert pump_groups_found == pump_groups_present
def test_all_wavelength_groups_under_rounds_are_found(organized_faulty_data: H5File):
"""Ensures that all of the wavelength groups that are subgroups of rounds are found
"""
wavelength_groups_found = {path for path in
nav.wavelengths_under_rounds_paths(organized_faulty_data)}
all_paths = []
organized_faulty_data.visit(lambda path: all_paths.append(path))
wavelength_groups_present = {'/' + p for p in all_paths
if p.endswith('76487')
or p.endswith('76715')
or p.endswith('76940')}
assert wavelength_groups_found == wavelength_groups_present
|
nilq/baby-python
|
python
|
from nhlscrapi.games.events import EventType as ET, EventFactory as EF
from nhlscrapi.scrapr import descparser as dp
def __shot_type(**kwargs):
skater_ct = kwargs['skater_ct'] if 'skater_ct' in kwargs else 12
period = kwargs['period'] if 'period' in kwargs else 1
if period < 5:
return ET.Shot
# elif period < 5:
# return ET.PenaltyShot
else:
return ET.ShootOutAtt
def __goal_type(**kwargs):
skater_ct = kwargs['skater_ct'] if 'skater_ct' in kwargs else 12
period = kwargs['period'] if 'period' in kwargs else 1
gt = kwargs['game_type']
if skater_ct <= 7 and period > 4 and gt < 3:
return ET.ShootOutGoal
else:
return ET.Goal
def event_type_mapper(event_str, **kwargs):
event_type_map = {
"SHOT": __shot_type,
"SHOT (!)": __shot_type,
"SHOT (*)": __shot_type,
"BLOCK": lambda **kwargs: ET.Block,
"BLOCKED SHOT": lambda **kwargs: ET.Block,
"MISS": lambda **kwargs: ET.Miss,
"MISSED SHOT": lambda **kwargs: ET.Miss,
"GOAL": __goal_type,
"HIT": lambda **kwargs: ET.Hit,
"HIT (!)": lambda **kwargs: ET.Hit,
"HIT (*)": lambda **kwargs: ET.Hit,
"FAC": lambda **kwargs: ET.FaceOff,
"FACE-OFF": lambda **kwargs: ET.FaceOff,
"GIVE": lambda **kwargs: ET.Giveaway,
"GIVEAWAY": lambda **kwargs: ET.Giveaway,
"TAKE": lambda **kwargs: ET.Takeaway,
"TAKEAWAY": lambda **kwargs: ET.Takeaway,
"PENL": lambda **kwargs: ET.Penalty,
"PENALTY": lambda **kwargs: ET.Penalty,
"STOP": lambda **kwargs: ET.Stoppage,
"STOPPAGE": lambda **kwargs: ET.Stoppage,
"PEND": lambda **kwargs: ET.PeriodEnd,
"GEND": lambda **kwargs: ET.GameEnd,
"SOC": lambda **kwargs: ET.ShootOutEnd
}
e_type = event_type_map[event_str](**kwargs) if event_str in event_type_map else ET.Event
return EF.Create(e_type)
def parse_event_desc(event, season = 2008):
if event.event_type == ET.Shot and season >= 2008:
dp.parse_shot_desc_08(event)
# elif event.event_type == ET.PenaltyShot:
# dp.parse_penalty_shot_desc_08(event)
elif event.event_type == ET.Goal and season >= 2008:
dp.parse_goal_desc_08(event)
elif event.event_type == ET.Miss and season >= 2008:
dp.parse_miss_08(event)
elif event.event_type == ET.FaceOff and season >= 2008:
dp.parse_faceoff_08(event)
elif event.event_type == ET.Hit and season >= 2008:
dp.parse_hit_08(event)
elif event.event_type == ET.Block and season >= 2008:
dp.parse_block_08(event)
elif event.event_type == ET.Takeaway and season >= 2008:
dp.parse_takeaway_08(event)
elif event.event_type == ET.Giveaway and season >= 2008:
dp.parse_giveaway_08(event)
elif event.event_type == ET.ShootOutGoal:
dp.parse_shootout(event)
else:
dp.default_desc_parser(event)
|
nilq/baby-python
|
python
|
from django import VERSION
if VERSION < (3, 2):
default_app_config = (
"rest_framework_simplejwt.token_blacklist.apps.TokenBlacklistConfig"
)
|
nilq/baby-python
|
python
|
# exercise/views.py
# Jake Malley
# 01/02/15
"""
Define all of the routes for the exercise blueprint.
"""
# Imports
from flask import flash, redirect, render_template, \
request, url_for, Blueprint, abort
from flask.ext.login import login_required, current_user
from forms import AddRunningForm, AddCyclingForm, AddSwimmingForm, CompareMemberForm, EditExerciseForm
from traininglog.models import Member, Exercise, Weight, Message, RunningLookUp, CyclingLookUp, SwimmingLookUp
from traininglog import db
from datetime import datetime, date, timedelta
from querying_functions import *
from operator import itemgetter
from traininglog.weight.views import weight_required
# Setup the exercise blueprint.
exercise_blueprint = Blueprint(
'exercise', __name__,
template_folder='templates'
)
# Define the routes
@exercise_blueprint.route('/')
@login_required
def index():
"""
Homepage for all the exercise data.
Displays forms for adding exercise, options for generating reports
then a table for all the exercise.
"""
# Create all of the forms.
add_running_form = AddRunningForm()
add_cycling_form = AddCyclingForm()
add_swimming_form = AddSwimmingForm()
# Get all the exercise data.
exercise_data = Exercise.query.filter_by(member=current_user).order_by(Exercise.id.desc()).limit(10).all()
# Get all the current members.
members = Member.query.all()
# Create the choices list for the compare form.
choices = [(member.get_id(), member.get_full_name()) for member in members]
# Create the form.
compare_form = CompareMemberForm()
compare_form.compare_member_1.choices = choices
compare_form.compare_member_2.choices = choices
# Display the exercise home page passing in the forms and recent data etc.
return render_template('index.html', add_running_form=add_running_form, add_swimming_form=add_swimming_form, add_cycling_form=add_cycling_form, exercise_data=exercise_data,compare_form=compare_form)
@exercise_blueprint.route('/add_running', methods=['GET','POST'])
@login_required
@weight_required
def add_running():
"""
Displays a form for users to add running.
"""
# Create the running form.
add_running_form = AddRunningForm()
# Create empty message and error.
message = None
error = None
# Make sure the method was post.
if request.method == 'POST':
# Validate the form.
if add_running_form.validate_on_submit():
# Get the current time.
now = datetime.utcnow()
# Get the all the last posts within the last minute.
last_posts = Exercise.query.filter_by(member=current_user).filter(Exercise.date > (now-timedelta(minutes=1))).all()
# Make sure they aren't cheating by having more than 24 hours in one day.
# And they haven't added a post in the last 30 seconds. i.e. they aren't rapidly clicking the button.
if (get_exercise_total(now) + float(add_running_form.duration.data) <= 24) and len(last_posts) < 5:
# Look Up the calories burned and commit it.
# Get users most recent weight.
user_weight = Weight.query.filter_by(member=current_user).order_by(Weight.id.desc()).first().get_weight()
calories_burned = (float(RunningLookUp.query.filter_by(id=add_running_form.exercise_level.data).first().calories_burned)/80)*user_weight*float(add_running_form.duration.data)
# Add the exercise to the database.
db.session.add(Exercise(now, 'running', add_running_form.exercise_level.data, add_running_form.duration.data, calories_burned, current_user.get_id()))
# Commit the changes.
db.session.commit()
# Flash a success message.
flash("Exercise successfully added.")
# Add a well done message.
message = "Well Done you burned "+str(calories_burned)+" calories in that session."
else:
# Make the correct error message.
flash("An error occurred adding that exercise.",'error')
if (get_exercise_total(now) + float(add_running_form.duration.data) > 24):
error = "Exercise has not been added as the current total for today exceeds 24 hours."
else:
error = "You have tried to add too many events in the last minute, please wait then try again."
# Get the last 4 exercises for running.
running_data = Exercise.query.filter_by(exercise_type='running',member=current_user).order_by(Exercise.id.desc()).limit(4).all()
# Display the add running page.
return render_template('add_running.html', add_running_form=add_running_form, message=message,error=error,running_data=running_data)
@exercise_blueprint.route('/add_cycling', methods=['GET','POST'])
@login_required
@weight_required
def add_cycling():
"""
Displays a form for users to add cycling.
"""
# Create empty message and error.
message = None
error = None
# Create the cycling form.
add_cycling_form = AddCyclingForm()
# Make sure the method was post.
if request.method == 'POST':
# Validate the form.
if add_cycling_form.validate_on_submit():
# Get the current time.
now = datetime.utcnow()
# Get the all the last posts within the last minute.
last_posts = Exercise.query.filter_by(member=current_user).filter(Exercise.date > (now-timedelta(minutes=1))).all()
# Make sure they aren't cheating by having more than 24 hours in one day.
# And they haven't added a post in the last 30 seconds. i.e. they aren't rapidly clicking the button.
if (get_exercise_total(now) + float(add_cycling_form.duration.data) <= 24) and len(last_posts) < 5:
# Look Up the calories burned and commit it.
# Get users most recent weight.
user_weight = Weight.query.filter_by(member=current_user).order_by(Weight.id.desc()).first().get_weight()
calories_burned = (float(CyclingLookUp.query.filter_by(id=add_cycling_form.exercise_level.data).first().calories_burned)/80)*user_weight*float(add_cycling_form.duration.data)
# Add the exercise to the database.
db.session.add(Exercise(now, 'cycling', add_cycling_form.exercise_level.data, add_cycling_form.duration.data, calories_burned, current_user.get_id()))
# Commit the changes.
db.session.commit()
# Flash a success message.
flash("Exercise successfully added.")
# Add a well done message.
message = "Well Done you burned "+str(calories_burned)+" calories in that session."
else:
# Make the correct error message.
flash("An error occurred adding that exercise.",'error')
if (get_exercise_total(now) + float(add_cycling_form.duration.data) > 24):
error = "Exercise has not been added as the current total for today exceeds 24 hours."
else:
error = "You have tried to add too many events in the last minute, please wait then try again."
# Get the last 4 exercises for running.
cycling_data = Exercise.query.filter_by(exercise_type='cycling',member=current_user).order_by(Exercise.id.desc()).limit(4).all()
# Display the add cycling page.
return render_template('add_cycling.html', add_cycling_form=add_cycling_form, message=message,error=error, cycling_data=cycling_data)
@exercise_blueprint.route('/add_swimming', methods=['GET','POST'])
@login_required
@weight_required
def add_swimming():
"""
Displays a form for users to add swimming.
"""
# Create empty message and error.
message = None
error = None
# Create the swimming form.
add_swimming_form = AddSwimmingForm()
# Make sure the method was post.
if request.method == 'POST':
# Validate the form.
if add_swimming_form.validate_on_submit():
# Get the current time.
now = datetime.utcnow()
# Get the all the last posts within the last minute.
last_posts = Exercise.query.filter_by(member=current_user).filter(Exercise.date > (now-timedelta(minutes=1))).all()
# Make sure they aren't cheating by having more than 24 hours in one day.
# And they haven't added a post in the last 30 seconds. i.e. they aren't rapidly clicking the button.
if (get_exercise_total(now) + float(add_swimming_form.duration.data) <= 24) and len(last_posts) < 5:
# Look Up the calories burned and commit it.
# Get users most recent weight.
user_weight = Weight.query.filter_by(member=current_user).order_by(Weight.id.desc()).first().get_weight()
calories_burned = (float(SwimmingLookUp.query.filter_by(id=add_swimming_form.exercise_level.data).first().calories_burned)/80)*user_weight*float(add_swimming_form.duration.data)
# Add the exercise to the database.
db.session.add(Exercise(now, 'swimming', add_swimming_form.exercise_level.data, add_swimming_form.duration.data, calories_burned, current_user.get_id()))
# Commit the changes.
db.session.commit()
# Flash a success message.
flash("Exercise successfully added.")
# Add a well done message.
message = "Well Done you burned "+str(calories_burned)+" calories in that session."
else:
# Make the correct error message.
flash("An error occurred adding that exercise.",'error')
if (get_exercise_total(now) + float(add_swimming_form.duration.data) > 24):
error = "Exercise has not been added as the current total for today exceeds 24 hours."
else:
error = "You have tried to add too many events in the last minute, please wait then try again."
# Get the last 4 exercises for running.
swimming_data = Exercise.query.filter_by(exercise_type='swimming',member=current_user).order_by(Exercise.id.desc()).limit(4).all()
# Display the add swimming page.
return render_template('add_swimming.html', add_swimming_form=add_swimming_form, message=message,error=error, swimming_data=swimming_data)
@exercise_blueprint.route('/view')
@login_required
def view():
"""
Page to display a table of all the users exercise.
It allows users to then click on specific events,
which can then be viewed with view_exercise
"""
# Select the exercise data for the current member.
all_exercise_data = Exercise.query.filter_by(member=current_user).order_by(Exercise.id.desc()).all()
# Display the view page passing in all the exercise data.
return render_template('view.html',all_exercise_data=all_exercise_data,member=current_user)
@exercise_blueprint.route('/view/<exercise_id>')
@login_required
def view_exercise(exercise_id):
"""
Page to display a single exercise event.
Displays the event with the id = exercise_id
"""
# Get the exercise object with the given id.
exercise = Exercise.query.filter_by(id=exercise_id).first()
if exercise is not None:
# Create the edit exercise form.
edit_exercise_form = EditExerciseForm()
if exercise.member != current_user:
# If you are viewing another users exercise.
db.session.add(Message(datetime.utcnow(), current_user.get_full_name()+" Viewed your exercise", exercise.member.get_id()))
# Commit the changes.
db.session.commit()
# Get all of the exercise for the member of the given exercise.
all_exercise_data = Exercise.query.filter_by(member=exercise.member).order_by(Exercise.id.desc()).all()
else:
# The exercise ID is invalid abort with HTTP 404
abort(404)
# Display the view page for a specific exercise event.
return render_template('view.html',all_exercise_data=all_exercise_data,exercise=exercise,member=exercise.member,edit_exercise_form=edit_exercise_form)
@exercise_blueprint.route('/edit_exercise', methods=['POST','GET'])
@login_required
@weight_required
def edit_exercise():
"""
Allows users to edit their exercise.
"""
# Create the edit exercise form.
edit_exercise_form = EditExerciseForm()
if request.method=='POST' and edit_exercise_form.validate_on_submit():
# The method was post and the form was valid.
# Get the exercise object.
exercise = Exercise.query.filter_by(id=edit_exercise_form.exercise_id.data).first()
# Check the exercise is for the current user.
if exercise.member == current_user:
# OK lets run the update.
# See if the want us to delete it.
if bool(edit_exercise_form.delete.data) == True:
# Delete that exercise.
db.session.delete(exercise)
db.session.commit()
flash("Exercise has been deleted.")
# Send back to all the exercise as this event won't exist anymore.
return redirect(url_for('exercise.view'))
else:
# Calculate the new calories burned.
# (We don't want to include the new weight in case they did this when the weight was different etc.
# we are only updating the duration and thus calories burned as only a result of this.)
new_calories_burned = (exercise.calories_burned/exercise.exercise_duration)*float(edit_exercise_form.duration.data)
# Update the duration.
exercise.update_duration(float(edit_exercise_form.duration.data), new_calories_burned)
flash("Exercise has been updated.")
# Send them back to where they came from.
return redirect(request.referrer or url_for('exercise.index'))
@exercise_blueprint.route('/compare',methods=['POST','GET'])
@login_required
def compare():
"""
Page to compare to users.
"""
compare_form = CompareMemberForm()
# Get all the current members.
members = Member.query.all()
# Create the choices list for the compare form.
choices = [(member.get_id(), member.get_full_name()+' (id='+str(member.get_id())+')') for member in members]
# Create the form.
compare_form = CompareMemberForm()
compare_form.compare_member_1.choices = choices
compare_form.compare_member_2.choices = choices
# Make sure the method was post.
if request.method == 'POST':
# Validate the form.
if compare_form.validate_on_submit():
# Get data from the compare form.
# Get the member objects for both of the members select on the form.
compare_member_1 = Member.query.filter_by(id=compare_form.compare_member_1.data).first()
compare_member_2 = Member.query.filter_by(id=compare_form.compare_member_2.data).first()
# Get todays date.
now = datetime.utcnow()
# Create compare data for member 1.
compare_member_1_data = {
"name":compare_member_1.get_full_name(),
"total_time":get_exercise_total(datetime(now.year,1,1),member=compare_member_1),
"total_cals":get_cals_total(datetime(now.year,1,1),member=compare_member_1),
"running_time":get_hours_running(member=compare_member_1),
"running_cals":get_cals_running(member=compare_member_1),
"cycling_time":get_hours_cycling(member=compare_member_1),
"cycling_cals":get_cals_cycling(member=compare_member_1),
"swimming_time":get_hours_swimming(member=compare_member_1),
"swimming_cals":get_cals_swimming(member=compare_member_1),
}
# Create compare data for member 2.
compare_member_2_data = {
"name":compare_member_2.get_full_name(),
"total_time":get_exercise_total(datetime(now.year,1,1),member=compare_member_2),
"total_cals":get_cals_total(datetime(now.year,1,1),member=compare_member_2),
"running_time":get_hours_running(member=compare_member_2),
"running_cals":get_cals_running(member=compare_member_2),
"cycling_time":get_hours_cycling(member=compare_member_2),
"cycling_cals":get_cals_cycling(member=compare_member_2),
"swimming_time":get_hours_swimming(member=compare_member_2),
"swimming_cals":get_cals_swimming(member=compare_member_2),
}
# Get most recent exercise for the charts
compare_member_1_exercise = Exercise.query.filter_by(member=compare_member_1).order_by(Exercise.id.desc()).limit(5).all()
compare_member_2_exercise = Exercise.query.filter_by(member=compare_member_2).order_by(Exercise.id.desc()).limit(5).all()
# Chart data for time
chart_data_time_1 = [ exercise.exercise_duration for exercise in compare_member_1_exercise][::-1]
chart_data_time_2 = [ exercise.exercise_duration for exercise in compare_member_2_exercise][::-1]
# Chart data for calories
chart_data_calories_1 = [ exercise.calories_burned for exercise in compare_member_1_exercise][::-1]
chart_data_calories_2 = [ exercise.calories_burned for exercise in compare_member_2_exercise][::-1]
return render_template('compare.html',compare_member_1_data=compare_member_1_data,compare_member_2_data=compare_member_2_data, compare_form=compare_form,chart_data_time_1=chart_data_time_1,chart_data_time_2=chart_data_time_2,chart_data_calories_1=chart_data_calories_1,chart_data_calories_2=chart_data_calories_2)
# Display the compare page.
return render_template('compare.html', compare_form=compare_form)
@exercise_blueprint.route('/picktheteam')
@login_required
def picktheteam():
"""
Page to display the team of eight runners.
"""
# Get all of the members in the database.
members = Member.query.all()
# Create a datetime object for this year.
date = datetime(datetime.utcnow().year,1,1)
# Get url argument to see if we need to display all the member or just the top 8.
if request.args.get('all') == "true":
page_title="All Members"
pick_team=False
else:
page_title="Pick the Team"
pick_team=True
# Get url argument to see if we are ordering by calories_burned or total hours exercised.
if request.args.get('order_by') == "hours":
order_by = 2
else:
order_by = 1
# Create a new list for the ordered members to be stored in.
members_ordered=[]
# For each member.
for member in members:
# Calculate the total calories burned for that member this year.
calories_burned = get_cals_total(date=date,member=member)
# Calculate the total hours exercised for that member this year.
hours_exercised = get_exercise_total(date=date, member=member)
# Add a tuple of the member and the calories burned to the ordered members list.
members_ordered.append((member, calories_burned, hours_exercised))
# Actually order the list by the second element in each one. (The calories burned.)
# (Reversing the list as it orders it in ascending order.)
members_ordered = sorted(members_ordered, key=itemgetter(order_by))[::-1]
# Display the page to pick the team.
return render_template("exercise_picktheteam.html", page_title=page_title,pick_team=pick_team, members_ordered=members_ordered)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""Tests for `magic_dot` package."""
import pytest
from collections import namedtuple
from magic_dot import MagicDot
from magic_dot import NOT_FOUND
from magic_dot.exceptions import NotFound
def test_can():
"""Test that dict key is accessible as a hash ."""
md = MagicDot({"num": 1})
assert md.num.get() == 1
def test_yet():
"""Test NOT_FOUND is returned."""
md = MagicDot({"num": 1})
assert md.buba.get() is NOT_FOUND
def test_other():
"""Test supplied default is returned for NOT_FOUND"""
md = MagicDot({"num": 1})
assert md.bubba.get("something") == "something"
def test_coat():
"""Test that attributes are extracted first."""
class AttrKey(dict):
a = 7
ak = AttrKey()
ak['a'] = 8
md = MagicDot(ak)
assert md.a.get() == 7
def test_ride():
"""Test that indexed processing happens by default."""
nt = namedtuple("NT", "x")(1)
md = MagicDot([nt, None, nt])
assert md[1].get() is None
def test_both():
"""Test that exception is enabled with init."""
md = MagicDot({}, exception=True)
with pytest.raises(NotFound):
md.nonexistent.get()
def test_been():
"""Test that exception is enabled with exception."""
md = MagicDot({})
with pytest.raises(NotFound):
md.exception().nonexistent
def test_curve():
"""Test that exception does not affect the get after NOT_FOUND is detected."""
md = MagicDot({})
md.nonexistent.exception().get()
def test_pie():
"""Test that TypeError is raised when iterating over non-data"""
md = MagicDot(1)
with pytest.raises(TypeError):
[x for x in md]
def test_cat():
"""Tests that TypeError is raised for valid non-iterable when iter_nf_as_empty() is set"""
md = MagicDot(1, iter_nf_as_empty=True)
with pytest.raises(TypeError):
[x for x in md]
def test_atom():
"""Tests that TypeError is raised for NOT_FOUND by default"""
md = MagicDot(1).nonexistent
with pytest.raises(TypeError):
[x for x in md]
def test_lesson():
"""Tests that NOT_FOUND returns empty generator with iter_nf_as_empty"""
md = MagicDot(1, iter_nf_as_empty=True).nonexistent
assert [x for x in md] == []
def test_layers():
"""Tests that NOT_FOUND returns empty generator with iter_nf_as_empty()"""
md = MagicDot(1).nonexistent.iter_nf_as_empty()
assert [x for x in md] == []
def test_trace():
"""Tests ability to walk iterable data."""
md = MagicDot([None, 1, 2])
expected = [None, 1, 2]
for x in md:
assert x.get() == expected.pop(0)
def test_sign():
"""Tests ability to walk iterable data."""
md = MagicDot([None, 1, 2])
expected = [None, 1, 2]
for x in md:
assert x.get() == expected.pop(0)
def test_sign():
"""Tests pluck of attributes and nonexistent data."""
nt = namedtuple("NT", "x")(1)
md = MagicDot([nt, None, nt])
assert md.pluck("x").get() == [1, NOT_FOUND, 1]
def test_money():
"""Tests pluck of keys and nonexistent data."""
d = {"x": 1}
md = MagicDot([d, None, d])
assert md.pluck("x").get() == [1, NOT_FOUND, 1]
def test_whistle():
"""Tests pluck of nonexistent data raises TypeError"""
md = MagicDot(1)
with pytest.raises(TypeError):
md.nonexistent.pluck('z')
def test_neighborhood():
"""Tests that pluck of nonexistent data with .iter_nf_as_empty returns empty."""
md = MagicDot(1)
assert md.nonexistent.iter_nf_as_empty().pluck('whatevs').get() == []
def test_vote():
"""Tests that pluck of noniterable gives type_error"""
md = MagicDot(1)
with pytest.raises(TypeError):
md.pluck('z')
def test_vote():
"""Tests that pluck of noniterable gives type_error even if .iter_nf_as_empty is set."""
md = MagicDot(1)
with pytest.raises(TypeError):
md.iter_nf_as_empty().pluck('z')
def test_yellow():
"""Test that a pluck of NOT_FOUND data raises an NotFound exception if .exception is set"""
nt = namedtuple("NT", "x")(1)
md = MagicDot([nt, None, nt])
with pytest.raises(NotFound):
md.exception().pluck("x")
def test_supply():
"""Test that boolean math is not allowed with magic_dot."""
md = MagicDot(1)
with pytest.raises(RuntimeError):
not md
def test_important():
"""Test that boolean math is not allowed on NOT_FOUND"""
md = MagicDot(1)
with pytest.raises(RuntimeError):
not md.nonexistent.get()
def test_in():
"""Test that repr for NOT_FOUND works nicely (for documentation)."""
md = MagicDot(1)
assert repr(md.nonexistent.get()) == "magic_dot.NOT_FOUND"
def test_gate():
"""Test that setting exception creates a new md"""
md = MagicDot(1)
assert md is not md.exception()
def test_bowl():
"""Test that setting exception twice does note create a new md"""
md = MagicDot(1, exception=True)
assert md is md.exception()
def test_solve():
"""Test that setting iter_nf_as_empty creates a new md"""
md = MagicDot(1)
assert md is not md.iter_nf_as_empty()
def test_reader():
"""Test that setting iter_nf_as_empty twice does note create a new md"""
md = MagicDot(1, iter_nf_as_empty=True)
assert md is md.iter_nf_as_empty()
|
nilq/baby-python
|
python
|
message = 'This is submodule 1.'
def module_testing():
print(message)
|
nilq/baby-python
|
python
|
# SISO program G.py
# This function is a placeholder for a generic computable function G.
# This particular choice of G returns the first character of the input
# string.
import utils
from utils import rf
def G(inString):
if len(inString) >= 1:
return inString[0]
else:
return ""
def testG():
testvals = [
("", ""),
("x", "x"),
("abcdef", "a"),
]
for (inString, solution) in testvals:
val = G(inString)
utils.tprint(inString, ":", val)
assert val == solution
|
nilq/baby-python
|
python
|
'''
LICENSE: MIT
https://github.com/keras-team/keras/blob/a07253d8269e1b750f0a64767cc9a07da8a3b7ea/LICENSE
実験メモ
Dropoutをなくしてみたが、あまりへんかなし
SGDにへんこうしたら、しゅうそくがすごくおそくなった
面白い。
試したいアイデアがあるので、
自前のactivation functionを書いてみる。
'''
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras import optimizers
from keras.layers import Activation
from keras import backend
from keras.utils.generic_utils import get_custom_objects
smoothing = 0
def custom_activation(x):
return smoothing * backend.tanh(x / smoothing)
def replace_intermediate_layer_in_keras(model, layer_id, new_layer):
from keras.models import Model
layers = [l for l in model.layers]
x = layers[0].output
for i in range(1, len(layers)):
if i == layer_id:
x = new_layer(x)
else:
x = layers[i](x)
new_model = Model(input=model.input, output=x)
return new_model
batch_size = 128
num_classes = 10
epochs = 20
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Dense(512, activation='linear', input_shape=(784,)))
model.add(Activation(custom_activation))
# model.add(Dropout(0.2))
model.add(Dense(512, activation='linear'))
model.add(Activation(custom_activation))
# model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='linear'))
model.add(Activation(custom_activation))
# model.add(Dense(num_classes, activation='softmax'))
model.summary()
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
rms_prop = optimizers.RMSprop()
# model.compile(loss='categorical_crossentropy',
# optimizer=sgd,
# metrics=['accuracy'])
# to create input layer
model = replace_intermediate_layer_in_keras(model, 1, Activation(custom_activation))
for i in range(5):
smoothing = 0.01 * 1e2**(1.0 * (4 - i) / 4)
model = replace_intermediate_layer_in_keras(model, 2, Activation(custom_activation))
# model.summary()
model = replace_intermediate_layer_in_keras(model, 4, Activation(custom_activation))
# model.summary()
model = replace_intermediate_layer_in_keras(model, 6, Activation(custom_activation))
# model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=1,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
|
nilq/baby-python
|
python
|
from .tracebackturbo import *
|
nilq/baby-python
|
python
|
from __future__ import unicode_literals
import csv
import io
import json
import os
import string
from collections import OrderedDict
from unittest import TestCase
import pandas as pd
from backports.tempfile import TemporaryDirectory
from tempfile import NamedTemporaryFile
from hypothesis import (
given,
HealthCheck,
reproduce_failure,
settings,
)
from hypothesis.strategies import (
dictionaries,
integers,
just,
lists,
text,
tuples,
)
from mock import patch, Mock
from oasislmf.exposures.manager import OasisExposuresManager
from oasislmf.exposures.pipeline import OasisFilesPipeline
from oasislmf.utils.coverage import (
BUILDING_COVERAGE_CODE,
CONTENTS_COVERAGE_CODE,
OTHER_STRUCTURES_COVERAGE_CODE,
TIME_COVERAGE_CODE,
)
from oasislmf.utils.exceptions import OasisException
from oasislmf.utils.status import (
KEYS_STATUS_FAIL,
KEYS_STATUS_NOMATCH,
KEYS_STATUS_SUCCESS,
)
from ..models.fakes import fake_model
from tests import (
canonical_exposure_data,
keys_data,
write_input_files,
)
class OasisExposureManagerAddModel(TestCase):
def test_models_is_empty___model_is_added_to_model_dict(self):
model = fake_model('supplier', 'model', 'version')
manager = OasisExposuresManager()
manager.add_model(model)
self.assertEqual({model.key: model}, manager.models)
def test_manager_already_contains_a_model_with_the_given_key___model_is_replaced_in_models_dict(self):
first = fake_model('supplier', 'model', 'version')
second = fake_model('supplier', 'model', 'version')
manager = OasisExposuresManager(oasis_models=[first])
manager.add_model(second)
self.assertIs(second, manager.models[second.key])
def test_manager_already_contains_a_diferent_model___model_is_added_to_dict(self):
first = fake_model('first', 'model', 'version')
second = fake_model('second', 'model', 'version')
manager = OasisExposuresManager(oasis_models=[first])
manager.add_model(second)
self.assertEqual({
first.key: first,
second.key: second,
}, manager.models)
class OasisExposureManagerDeleteModels(TestCase):
def test_models_is_not_in_manager___no_model_is_removed(self):
manager = OasisExposuresManager([
fake_model('supplier', 'model', 'version'),
fake_model('supplier2', 'model2', 'version2'),
])
expected = manager.models
manager.delete_models([fake_model('supplier3', 'model3', 'version3')])
self.assertEqual(expected, manager.models)
def test_models_exist_in_manager___models_are_removed(self):
models = [
fake_model('supplier', 'model', 'version'),
fake_model('supplier2', 'model2', 'version2'),
fake_model('supplier3', 'model3', 'version3'),
]
manager = OasisExposuresManager(models)
manager.delete_models(models[1:])
self.assertEqual({models[0].key: models[0]}, manager.models)
class OasisExposureManagerLoadCanonicalExposuresProfile(TestCase):
def test_model_and_kwargs_are_not_set___result_is_empty_dict(self):
profile = OasisExposuresManager().load_canonical_exposures_profile()
self.assertEqual(None, profile)
@given(dictionaries(text(), text()))
def test_model_is_set_with_profile_json___models_profile_is_set_to_expected_json(self, expected):
model = fake_model(resources={'canonical_exposures_profile_json': json.dumps(expected)})
profile = OasisExposuresManager().load_canonical_exposures_profile(oasis_model=model)
self.assertEqual(expected, profile)
self.assertEqual(expected, model.resources['canonical_exposures_profile'])
@given(dictionaries(text(), text()), dictionaries(text(), text()))
def test_model_is_set_with_profile_json_and_profile_json_is_passed_through_kwargs___kwargs_profile_is_used(
self,
model_profile,
kwargs_profile
):
model = fake_model(resources={'canonical_exposures_profile_json': json.dumps(model_profile)})
profile = OasisExposuresManager().load_canonical_exposures_profile(oasis_model=model, canonical_exposures_profile_json=json.dumps(kwargs_profile))
self.assertEqual(kwargs_profile, profile)
self.assertEqual(kwargs_profile, model.resources['canonical_exposures_profile'])
@given(dictionaries(text(), text()))
def test_model_is_set_with_profile_json_path___models_profile_is_set_to_expected_json(self, expected):
with NamedTemporaryFile('w') as f:
json.dump(expected, f)
f.flush()
model = fake_model(resources={'canonical_exposures_profile_json_path': f.name})
profile = OasisExposuresManager().load_canonical_exposures_profile(oasis_model=model)
self.assertEqual(expected, profile)
self.assertEqual(expected, model.resources['canonical_exposures_profile'])
@settings(suppress_health_check=[HealthCheck.too_slow])
@given(dictionaries(text(), text()), dictionaries(text(), text()))
def test_model_is_set_with_profile_json_path_and_profile_json_path_is_passed_through_kwargs___kwargs_profile_is_used(
self,
model_profile,
kwargs_profile
):
with NamedTemporaryFile('w') as model_file, NamedTemporaryFile('w') as kwargs_file:
json.dump(model_profile, model_file)
model_file.flush()
json.dump(kwargs_profile, kwargs_file)
kwargs_file.flush()
model = fake_model(resources={'canonical_exposures_profile_json_path': model_file.name})
profile = OasisExposuresManager().load_canonical_exposures_profile(oasis_model=model, canonical_exposures_profile_json_path=kwargs_file.name)
self.assertEqual(kwargs_profile, profile)
self.assertEqual(kwargs_profile, model.resources['canonical_exposures_profile'])
class OasisExposureManagerGetKeys(TestCase):
def create_model(
self,
lookup='lookup',
keys_file_path='key_file_path',
keys_errors_file_path='keys_errors_file_path',
model_exposures_file_path='model_exposures_file_path'
):
model = fake_model(resources={'lookup': lookup})
model.resources['oasis_files_pipeline'].keys_file_path = keys_file_path
model.resources['oasis_files_pipeline'].keys_errors_file_path = keys_errors_file_path
model.resources['oasis_files_pipeline'].model_exposures_file_path = model_exposures_file_path
return model
@given(
lookup=text(min_size=1, alphabet=string.ascii_letters),
keys_file_path=text(min_size=1, alphabet=string.ascii_letters),
keys_errors_file_path=text(min_size=1, alphabet=string.ascii_letters),
exposures_file_path=text(min_size=1, alphabet=string.ascii_letters)
)
def test_model_is_supplied_kwargs_are_not___lookup_keys_files_and_exposures_file_from_model_are_used(
self,
lookup,
keys_file_path,
keys_errors_file_path,
exposures_file_path
):
model = self.create_model(lookup=lookup, keys_file_path=keys_file_path, keys_errors_file_path=keys_errors_file_path, model_exposures_file_path=exposures_file_path)
with patch('oasislmf.exposures.manager.OasisLookupFactory.save_results', Mock(return_value=(keys_file_path, 1, keys_errors_file_path, 1))) as oklf_mock:
res_keys_file_path, res_keys_errors_file_path = OasisExposuresManager().get_keys(oasis_model=model)
oklf_mock.assert_called_once_with(
lookup,
keys_file_path,
errors_fp=keys_errors_file_path,
model_exposures_fp=exposures_file_path
)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_file_path, keys_file_path)
self.assertEqual(res_keys_file_path, keys_file_path)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_errors_file_path, keys_errors_file_path)
self.assertEqual(res_keys_errors_file_path, keys_errors_file_path)
@given(
model_lookup=text(min_size=1, alphabet=string.ascii_letters),
model_keys_fp=text(min_size=1, alphabet=string.ascii_letters),
model_keys_errors_fp=text(min_size=1, alphabet=string.ascii_letters),
model_exposures_fp=text(min_size=1, alphabet=string.ascii_letters),
lookup=text(min_size=1, alphabet=string.ascii_letters),
keys_fp=text(min_size=1, alphabet=string.ascii_letters),
keys_errors_fp=text(min_size=1, alphabet=string.ascii_letters),
exposures_fp=text(min_size=1, alphabet=string.ascii_letters)
)
def test_model_and_kwargs_are_supplied___lookup_keys_files_and_exposures_file_from_kwargs_are_used(
self,
model_lookup,
model_keys_fp,
model_keys_errors_fp,
model_exposures_fp,
lookup,
keys_fp,
keys_errors_fp,
exposures_fp
):
model = self.create_model(lookup=model_lookup, keys_file_path=model_keys_fp, keys_errors_file_path=model_keys_errors_fp, model_exposures_file_path=model_exposures_fp)
with patch('oasislmf.exposures.manager.OasisLookupFactory.save_results', Mock(return_value=(keys_fp, 1, keys_errors_fp, 1))) as oklf_mock:
res_keys_file_path, res_keys_errors_file_path = OasisExposuresManager().get_keys(
oasis_model=model,
lookup=lookup,
model_exposures_file_path=exposures_fp,
keys_file_path=keys_fp,
keys_errors_file_path=keys_errors_fp
)
oklf_mock.assert_called_once_with(
lookup,
keys_fp,
errors_fp=keys_errors_fp,
model_exposures_fp=exposures_fp
)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_file_path, keys_fp)
self.assertEqual(res_keys_file_path, keys_fp)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_errors_file_path, keys_errors_fp)
self.assertEqual(res_keys_errors_file_path, keys_errors_fp)
class OasisExposureManagerLoadMasterDataframe(TestCase):
@settings(suppress_health_check=[HealthCheck.too_slow])
@given(
profile_element_name=text(alphabet=string.ascii_letters, min_size=1),
keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), size=10),
exposures=canonical_exposure_data(10, min_value=1)
)
def test_row_in_keys_data_is_missing_from_exposure_data___oasis_exception_is_raised(
self,
profile_element_name,
keys,
exposures
):
matching_exposures = [e for e in exposures if e[0] in map(lambda k: k['id'], keys)]
exposures.pop(exposures.index(matching_exposures[0]))
profile = {
profile_element_name: {'ProfileElementName': profile_element_name, 'FieldName': 'TIV', 'CoverageTypeID': 1}
}
with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file:
write_input_files(keys, keys_file.name, exposures, exposures_file.name, profile_element_name=profile_element_name)
with self.assertRaises(OasisException):
OasisExposuresManager().load_master_data_frame(exposures_file.name, keys_file.name, profile)
@settings(suppress_health_check=[HealthCheck.too_slow])
@given(
profile_element_name=text(alphabet=string.ascii_letters, min_size=1),
keys=keys_data(from_coverage_type_ids=just(CONTENTS_COVERAGE_CODE), from_statuses=just(KEYS_STATUS_SUCCESS), size=10),
exposures=canonical_exposure_data(10, min_value=1)
)
def test_canonical_profile_coverage_types_dont_match_model_defined_coverage_types___oasis_exception_is_raised(
self,
profile_element_name,
keys,
exposures
):
matching_exposures = [e for e in exposures if e[0] in map(lambda k: k['id'], keys)]
exposures.pop(exposures.index(matching_exposures[0]))
profile = {
profile_element_name: {'ProfileElementName': profile_element_name, 'FieldName': 'TIV', 'CoverageTypeID': BUILDING_COVERAGE_CODE}
}
with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file:
write_input_files(keys, keys_file.name, exposures, exposures_file.name, profile_element_name=profile_element_name)
with self.assertRaises(OasisException):
OasisExposuresManager().load_master_data_frame(exposures_file.name, keys_file.name, profile)
@settings(suppress_health_check=[HealthCheck.too_slow])
@given(
profile_element_name=text(alphabet=string.ascii_letters, min_size=1),
keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), size=10),
exposures=canonical_exposure_data(num_rows=10, min_value=1)
)
def test_each_row_has_a_single_row_per_element_with_each_row_having_a_positive_value_for_the_profile_element___each_row_is_present(
self,
profile_element_name,
keys,
exposures
):
profile = {
profile_element_name: {'ProfileElementName': profile_element_name, 'FieldName': 'TIV', 'CoverageTypeID': 1}
}
expected = []
keys_values_tuples = map(lambda li: tuple(filter(lambda v: type(v) == int, li)), [k.values() for k in keys])
for i, zipped_data in enumerate(zip(keys_values_tuples, exposures)):
expected.append((
i + 1,
zipped_data[0],
zipped_data[1][1],
))
with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file:
write_input_files(keys, keys_file.name, exposures, exposures_file.name, profile_element_name=profile_element_name)
result = OasisExposuresManager().load_master_data_frame(
exposures_file.name,
keys_file.name,
profile,
)
self.assertEqual(len(expected), len(result))
for i in range(len(result)):
row = {k:(int(v) if k != 'tiv' else v) for k, v in result.iloc[i].to_dict().items()}
self.assertEqual(i + 1, row['item_id'])
self.assertEqual(i + 1, row['coverage_id'])
self.assertEqual(exposures[i][1], row['tiv'])
self.assertEqual(keys[i]['area_peril_id'], row['areaperil_id'])
self.assertEqual(keys[i]['vulnerability_id'], row['vulnerability_id'])
self.assertEqual(i + 1, row['group_id'])
self.assertEqual(1, row['summary_id'])
self.assertEqual(1, row['summaryset_id'])
@settings(suppress_health_check=[HealthCheck.too_slow])
@given(
profile_element_name=text(alphabet=string.ascii_letters, min_size=1),
keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), size=10),
exposures=canonical_exposure_data(num_rows=10, min_value=1)
)
def test_each_row_has_a_single_row_per_element_with_each_row_having_any_value_for_the_profile_element___rows_with_profile_elements_gt_0_are_present(
self,
profile_element_name,
keys,
exposures
):
profile = {
profile_element_name: {'ProfileElementName': profile_element_name, 'FieldName': 'TIV', 'CoverageTypeID': 1}
}
expected = []
keys_values_tuples = map(lambda li: tuple(filter(lambda v: type(v) == int, li)), [k.values() for k in keys])
row_id = 0
for zipped_keys, zipped_exposure in zip(keys_values_tuples, exposures):
if zipped_exposure[1] > 0:
row_id += 1
expected.append((
row_id,
zipped_keys,
zipped_exposure[1],
))
with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file:
write_input_files(keys, keys_file.name, exposures, exposures_file.name, profile_element_name=profile_element_name)
result = OasisExposuresManager().load_master_data_frame(
exposures_file.name,
keys_file.name,
profile,
)
self.assertEqual(len(expected), len(result))
for i in range(len(result)):
row = {k:(int(v) if k != 'tiv' else v) for k, v in result.iloc[i].to_dict().items()}
self.assertEqual(i + 1, row['item_id'])
self.assertEqual(i + 1, row['coverage_id'])
self.assertEqual(exposures[i][1], row['tiv'])
self.assertEqual(keys[i]['area_peril_id'], row['areaperil_id'])
self.assertEqual(keys[i]['vulnerability_id'], row['vulnerability_id'])
self.assertEqual(i + 1, row['group_id'])
self.assertEqual(1, row['summary_id'])
self.assertEqual(1, row['summaryset_id'])
class FileGenerationTestCase(TestCase):
def setUp(self):
self.items_filename = 'items.csv'
self.coverages_filename = 'coverages.csv'
self.gulsummaryxref_filename = 'gulsummaryxref.csv'
def check_items_file(self, keys, out_dir):
expected = [
{
'item_id': i + 1,
'coverage_id': i + 1,
'areaperil_id': key['area_peril_id'],
'vulnerability_id': key['vulnerability_id'],
'group_id': i + 1,
} for i, key in enumerate(keys)
]
with io.open(os.path.join(out_dir, self.items_filename), 'r', encoding='utf-8') as f:
result = list(pd.read_csv(f).T.to_dict().values())
self.assertEqual(expected, result)
def check_coverages_file(self, exposures, out_dir):
expected = [
{
'coverage_id': item_id + 1,
'tiv': item[1],
} for item_id, item in enumerate(exposures)
]
with io.open(os.path.join(out_dir, self.coverages_filename), 'r', encoding='utf-8') as f:
result = list(pd.read_csv(f).T.to_dict().values())
self.assertEqual(expected, result)
def check_gul_file(self, exposures, out_dir):
expected = [
{
'coverage_id': item_id + 1,
'summary_id': 1,
'summaryset_id': 1,
} for item_id in range(len(exposures))
]
with io.open(os.path.join(out_dir, self.gulsummaryxref_filename), 'r', encoding='utf-8') as f:
result = list(pd.read_csv(f).T.to_dict().values())
self.assertEqual(expected, result)
class OasisExposuresManagerGenerateItemsFile(FileGenerationTestCase):
@settings(suppress_health_check=[HealthCheck.too_slow])
@given(
keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), min_size=10),
exposures=canonical_exposure_data(10, min_value=1)
)
def test_paths_are_stored_in_the_model___model_paths_are_used(self, keys, exposures):
profile = {
'profile_element': {'ProfileElementName': 'profile_element', 'FieldName': 'TIV', 'CoverageTypeID': 1}
}
with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file, TemporaryDirectory() as out_dir:
write_input_files(keys, keys_file.name, exposures, exposures_file.name)
model = fake_model(resources={'canonical_exposures_profile': profile})
model.resources['oasis_files_pipeline'].keys_file_path = keys_file.name
model.resources['oasis_files_pipeline'].canonical_exposures_file_path = exposures_file.name
model.resources['oasis_files_pipeline'].items_file_path = os.path.join(out_dir, self.items_filename)
OasisExposuresManager().generate_items_file(oasis_model=model)
self.check_items_file(keys, out_dir)
@settings(suppress_health_check=[HealthCheck.too_slow])
@given(
keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), min_size=10),
exposures=canonical_exposure_data(10, min_value=1)
)
def test_paths_are_stored_in_the_kwargs___kwarg_paths_are_used(self, keys, exposures):
profile = {
'profile_element': {'ProfileElementName': 'profile_element', 'FieldName': 'TIV', 'CoverageTypeID': 1}
}
with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file, TemporaryDirectory() as out_dir:
write_input_files(keys, keys_file.name, exposures, exposures_file.name)
model = fake_model()
OasisExposuresManager().generate_items_file(
oasis_model=model,
canonical_exposures_profile=profile,
keys_file_path=keys_file.name,
canonical_exposures_file_path=exposures_file.name,
items_file_path=os.path.join(out_dir, self.items_filename)
)
self.check_items_file(keys, out_dir)
class OasisExposuresManagerGenerateCoveragesFile(FileGenerationTestCase):
@settings(suppress_health_check=[HealthCheck.too_slow])
@given(
keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), min_size=10),
exposures=canonical_exposure_data(10, min_value=1)
)
def test_paths_are_stored_in_the_model___model_paths_are_used(self, keys, exposures):
profile = {
'profile_element': {'ProfileElementName': 'profile_element', 'FieldName': 'TIV', 'CoverageTypeID': 1}
}
with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file, TemporaryDirectory() as out_dir:
write_input_files(keys, keys_file.name, exposures, exposures_file.name)
model = fake_model(resources={'canonical_exposures_profile': profile})
model.resources['oasis_files_pipeline'].keys_file_path = keys_file.name
model.resources['oasis_files_pipeline'].canonical_exposures_file_path = exposures_file.name
model.resources['oasis_files_pipeline'].coverages_file_path = os.path.join(out_dir, self.coverages_filename)
OasisExposuresManager().generate_coverages_file(oasis_model=model)
self.check_coverages_file(exposures, out_dir)
@settings(suppress_health_check=[HealthCheck.too_slow])
@given(
keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), min_size=10),
exposures=canonical_exposure_data(10, min_value=1)
)
def test_paths_are_stored_in_the_kwargs___kwarg_paths_are_used(self, keys, exposures):
profile = {
'profile_element': {'ProfileElementName': 'profile_element', 'FieldName': 'TIV', 'CoverageTypeID': 1}
}
with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file, TemporaryDirectory() as out_dir:
write_input_files(keys, keys_file.name, exposures, exposures_file.name)
model = fake_model()
OasisExposuresManager().generate_coverages_file(
oasis_model=model,
canonical_exposures_profile=profile,
keys_file_path=keys_file.name,
canonical_exposures_file_path=exposures_file.name,
coverages_file_path=os.path.join(out_dir, self.coverages_filename)
)
self.check_coverages_file(exposures, out_dir)
class OasisExposuresManagerGenerateGulsummaryxrefFile(FileGenerationTestCase):
@settings(suppress_health_check=[HealthCheck.too_slow])
@given(
keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), min_size=10),
exposures=canonical_exposure_data(10, min_value=1)
)
def test_paths_are_stored_in_the_model___model_paths_are_used(self, keys, exposures):
profile = {
'profile_element': {'ProfileElementName': 'profile_element', 'FieldName': 'TIV', 'CoverageTypeID': 1}
}
with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file, TemporaryDirectory() as out_dir:
write_input_files(keys, keys_file.name, exposures, exposures_file.name)
model = fake_model(resources={'canonical_exposures_profile': profile})
model.resources['oasis_files_pipeline'].keys_file_path = keys_file.name
model.resources['oasis_files_pipeline'].canonical_exposures_file_path = exposures_file.name
model.resources['oasis_files_pipeline'].gulsummaryxref_file_path = os.path.join(out_dir, self.gulsummaryxref_filename)
OasisExposuresManager().generate_gulsummaryxref_file(oasis_model=model)
self.check_gul_file(exposures, out_dir)
@settings(suppress_health_check=[HealthCheck.too_slow])
@given(
keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), min_size=10),
exposures=canonical_exposure_data(10, min_value=1)
)
def test_paths_are_stored_in_the_kwargs___kwarg_paths_are_used(self, keys, exposures):
profile = {
'profile_element': {'ProfileElementName': 'profile_element', 'FieldName': 'TIV', 'CoverageTypeID': 1}
}
with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file, TemporaryDirectory() as out_dir:
write_input_files(keys, keys_file.name, exposures, exposures_file.name)
model = fake_model()
OasisExposuresManager().generate_gulsummaryxref_file(
oasis_model=model,
canonical_exposures_profile=profile,
keys_file_path=keys_file.name,
canonical_exposures_file_path=exposures_file.name,
gulsummaryxref_file_path=os.path.join(out_dir, self.gulsummaryxref_filename)
)
self.check_gul_file(exposures, out_dir)
class OasisExposuresManagerGenerateOasisFiles(FileGenerationTestCase):
@settings(suppress_health_check=[HealthCheck.too_slow])
@given(
keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), min_size=10),
exposures=canonical_exposure_data(10, min_value=1)
)
def test_paths_are_stored_in_the_model___model_paths_are_used(self, keys, exposures):
profile = {
'profile_element': {'ProfileElementName': 'profile_element', 'FieldName': 'TIV', 'CoverageTypeID': 1}
}
with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file, TemporaryDirectory() as out_dir:
write_input_files(keys, keys_file.name, exposures, exposures_file.name)
model = fake_model(resources={'canonical_exposures_profile': profile})
model.resources['oasis_files_pipeline'].keys_file_path = keys_file.name
model.resources['oasis_files_pipeline'].canonical_exposures_file_path = exposures_file.name
model.resources['oasis_files_pipeline'].items_file_path = os.path.join(out_dir, self.items_filename)
model.resources['oasis_files_pipeline'].coverages_file_path = os.path.join(out_dir, self.coverages_filename)
model.resources['oasis_files_pipeline'].gulsummaryxref_file_path = os.path.join(out_dir, self.gulsummaryxref_filename)
OasisExposuresManager().generate_oasis_files(oasis_model=model)
self.check_items_file(keys, out_dir)
self.check_coverages_file(exposures, out_dir)
self.check_gul_file(exposures, out_dir)
@settings(suppress_health_check=[HealthCheck.too_slow])
@given(
keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), min_size=10),
exposures=canonical_exposure_data(10, min_value=1)
)
def test_paths_are_stored_in_the_kwargs___kwarg_paths_are_used(self, keys, exposures):
profile = {
'profile_element': {'ProfileElementName': 'profile_element', 'FieldName': 'TIV', 'CoverageTypeID': 1}
}
with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file, TemporaryDirectory() as out_dir:
write_input_files(keys, keys_file.name, exposures, exposures_file.name)
model = fake_model()
OasisExposuresManager().generate_oasis_files(
oasis_model=model,
canonical_exposures_profile=profile,
keys_file_path=keys_file.name,
canonical_exposures_file_path=exposures_file.name,
items_file_path=os.path.join(out_dir, self.items_filename),
coverages_file_path=os.path.join(out_dir, self.coverages_filename),
gulsummaryxref_file_path=os.path.join(out_dir, self.gulsummaryxref_filename)
)
self.check_items_file(keys, out_dir)
self.check_coverages_file(exposures, out_dir)
self.check_gul_file(exposures, out_dir)
class OasisExposuresTransformSourceToCanonical(TestCase):
@given(
source_exposures_file_path=text(),
source_to_canonical_exposures_transformation_file_path=text(),
source_exposures_validation_file_path=text(),
canonical_exposures_file_path=text()
)
def test_model_is_not_set___parameters_are_taken_from_kwargs(
self,
source_exposures_file_path,
source_to_canonical_exposures_transformation_file_path,
source_exposures_validation_file_path,
canonical_exposures_file_path
):
trans_call_mock = Mock()
with patch('oasislmf.exposures.manager.Translator', Mock(return_value=trans_call_mock)) as trans_mock:
OasisExposuresManager().transform_source_to_canonical(
source_exposures_file_path=source_exposures_file_path,
source_exposures_validation_file_path=source_exposures_validation_file_path,
source_to_canonical_exposures_transformation_file_path=source_to_canonical_exposures_transformation_file_path,
canonical_exposures_file_path=canonical_exposures_file_path
)
trans_mock.assert_called_once_with(
os.path.abspath(source_exposures_file_path),
os.path.abspath(canonical_exposures_file_path),
os.path.abspath(source_to_canonical_exposures_transformation_file_path),
os.path.abspath(source_exposures_validation_file_path),
append_row_nums=True,
)
trans_call_mock.assert_called_once_with()
@given(
source_exposures_file_path=text(),
source_exposures_validation_file_path=text(),
source_to_canonical_exposures_transformation_file_path=text(),
canonical_exposures_file_path=text()
)
def test_model_is_set___parameters_are_taken_from_model(
self,
source_exposures_file_path,
source_to_canonical_exposures_transformation_file_path,
source_exposures_validation_file_path,
canonical_exposures_file_path):
model = fake_model(resources={
'source_exposures_file_path': source_exposures_file_path,
'source_exposures_validation_file_path': source_exposures_validation_file_path,
'source_to_canonical_exposures_transformation_file_path': source_to_canonical_exposures_transformation_file_path,
})
model.resources['oasis_files_pipeline'].canonical_exposures_path = canonical_exposures_file_path
trans_call_mock = Mock()
with patch('oasislmf.exposures.manager.Translator', Mock(return_value=trans_call_mock)) as trans_mock:
OasisExposuresManager().transform_source_to_canonical(
source_exposures_file_path=source_exposures_file_path,
source_to_canonical_exposures_transformation_file_path=source_to_canonical_exposures_transformation_file_path,
source_exposures_validation_file_path=source_exposures_validation_file_path,
canonical_exposures_file_path=canonical_exposures_file_path
)
trans_mock.assert_called_once_with(
os.path.abspath(source_exposures_file_path),
os.path.abspath(canonical_exposures_file_path),
os.path.abspath(source_to_canonical_exposures_transformation_file_path),
os.path.abspath(source_exposures_validation_file_path),
append_row_nums=True,
)
trans_call_mock.assert_called_once_with()
class OasisExposuresTransformCanonicalToModel(TestCase):
@given(
canonical_exposures_file_path=text(),
canonical_exposures_validation_file_path=text(),
canonical_to_model_exposures_transformation_file_path=text(),
model_exposures_file_path=text()
)
def test_model_is_not_set___parameters_are_taken_from_kwargs(
self,
canonical_exposures_file_path,
canonical_to_model_exposures_transformation_file_path,
canonical_exposures_validation_file_path,
model_exposures_file_path):
trans_call_mock = Mock()
with patch('oasislmf.exposures.manager.Translator', Mock(return_value=trans_call_mock)) as trans_mock:
OasisExposuresManager().transform_canonical_to_model(
canonical_exposures_file_path=canonical_exposures_file_path,
canonical_to_model_exposures_transformation_file_path=canonical_to_model_exposures_transformation_file_path,
canonical_exposures_validation_file_path=canonical_exposures_validation_file_path,
model_exposures_file_path=model_exposures_file_path,
)
trans_mock.assert_called_once_with(
os.path.abspath(canonical_exposures_file_path),
os.path.abspath(model_exposures_file_path),
os.path.abspath(canonical_to_model_exposures_transformation_file_path),
os.path.abspath(canonical_exposures_validation_file_path),
append_row_nums=False,
)
trans_call_mock.assert_called_once_with()
@given(
canonical_exposures_file_path=text(),
canonical_exposures_validation_file_path=text(),
canonical_to_model_exposures_transformation_file_path=text(),
model_exposures_file_path=text()
)
def test_model_is_set___parameters_are_taken_from_model(
self,
canonical_exposures_file_path,
canonical_to_model_exposures_transformation_file_path,
canonical_exposures_validation_file_path,
model_exposures_file_path):
model = fake_model(resources={
'canonical_exposures_validation_file_path': canonical_exposures_validation_file_path,
'canonical_to_model_exposures_transformation_file_path': canonical_to_model_exposures_transformation_file_path,
})
model.resources['oasis_files_pipeline'].canonical_exposures_path = canonical_exposures_file_path
model.resources['oasis_files_pipeline'].model_exposures_file_path = model_exposures_file_path
trans_call_mock = Mock()
with patch('oasislmf.exposures.manager.Translator', Mock(return_value=trans_call_mock)) as trans_mock:
OasisExposuresManager().transform_canonical_to_model(
canonical_exposures_file_path=canonical_exposures_file_path,
canonical_exposures_validation_file_path=canonical_exposures_validation_file_path,
canonical_to_model_exposures_transformation_file_path=canonical_to_model_exposures_transformation_file_path,
model_exposures_file_path=model_exposures_file_path,
)
trans_mock.assert_called_once_with(
os.path.abspath(canonical_exposures_file_path),
os.path.abspath(model_exposures_file_path),
os.path.abspath(canonical_to_model_exposures_transformation_file_path),
os.path.abspath(canonical_exposures_validation_file_path),
append_row_nums=False,
)
trans_call_mock.assert_called_once_with()
class OasisExposureManagerCreate(TestCase):
@given(supplier=text(), model_id=text(), version=text())
def test_supplier_model_and_version_are_supplied___correct_key_is_created(self, supplier, model_id, version):
model = fake_model(supplier=supplier, model=model_id, version=version)
self.assertEqual('{}/{}/{}'.format(supplier, model_id, version), model.key)
def test_oasis_file_path_is_given___path_is_stored_as_absolute_path(self):
model = fake_model(resources={'oasis_files_path': 'some_path'})
result = model.resources['oasis_files_path']
expected = os.path.abspath('some_path')
self.assertEqual(expected, result)
def test_oasis_file_path_is_not_given___path_is_abs_path_of_default(self):
model = fake_model()
result = model.resources['oasis_files_path']
expected = os.path.abspath(os.path.join('Files', model.key.replace('/', '-')))
self.assertEqual(expected, result)
def test_file_pipeline_is_not_supplied___default_pipeline_is_set(self):
model = fake_model()
pipeline = model.resources['oasis_files_pipeline']
self.assertIsInstance(pipeline, OasisFilesPipeline)
self.assertEqual(pipeline.model_key, model.key)
def test_file_pipeline_is_supplied___pipeline_is_unchanged(self):
pipeline = OasisFilesPipeline()
model = fake_model(resources={'oasis_files_pipeline': pipeline})
self.assertIs(pipeline, model.resources['oasis_files_pipeline'])
def test_pipeline_is_not_a_pipeline_instance___oasis_exception_is_raised(self):
class FakePipeline(object):
pass
pipeline = FakePipeline()
with self.assertRaises(OasisException):
fake_model(resources={'oasis_files_pipeline': pipeline})
def test_canonical_exposures_profile_not_set___canonical_exposures_profile_in_none(self):
model = fake_model()
profile = model.resources['canonical_exposures_profile']
self.assertEqual(None, profile)
@given(expected=dictionaries(text(), text()))
def test_canonical_exposures_profile_json_set___canonical_exposures_profile_matches_json(self, expected):
model = fake_model(resources={'canonical_exposures_profile_json': json.dumps(expected)})
profile = model.resources['canonical_exposures_profile']
self.assertEqual(expected, profile)
@given(expected=dictionaries(text(), text()))
def test_canonical_exposures_profile_path_set___canonical_exposures_profile_matches_json(self, expected):
with NamedTemporaryFile('w') as f:
json.dump(expected, f)
f.flush()
model = fake_model(resources={'canonical_exposures_profile_json_path': f.name})
profile = model.resources['canonical_exposures_profile']
self.assertEqual(expected, profile)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(expected=dictionaries(text(), text()), new=dictionaries(text(), text()))
def test_canonical_exposures_profile_set___profile_is_not_updated(self, expected, new):
model = fake_model(resources={
'canonical_exposures_profile': expected,
'canonical_exposures_profile_json': json.dumps(new),
})
profile = model.resources['canonical_exposures_profile']
self.assertEqual(expected, profile)
|
nilq/baby-python
|
python
|
#coding=utf-8
from datetime import datetime
from django.db import models
from django.utils import timezone
from django.core.urlresolvers import reverse
from aops.settings import INT_CHOICES, STATUS_CHOICES
from cmdb import signals
from cmdb.models.ip_record import IpRecord
from cmdb.models.physical_server import PhysicalServer
class Host(models.Model):
uuid = models.CharField(max_length=255,unique=True)
roles = models.CharField(max_length=255, null=True)
physical_server = models.ForeignKey(PhysicalServer, related_name='host_physical_server', null=True)
salt_id = models.CharField(max_length=255, null=True)
ip_record = models.ManyToManyField(IpRecord, related_name='host_ip_record', null=True)
operating_system = models.CharField(max_length=255, null=True)
os_version = models.CharField(max_length=255, null=True)
host_name = models.CharField(max_length=255, null=True)
processor = models.CharField(max_length=255, null=True)
memory = models.CharField(max_length=255, null=True)
harddisk = models.CharField(max_length=255, null=True)
comment = models.CharField(max_length=255, null=True)
status = models.IntegerField(editable=True, choices=STATUS_CHOICES, default=0)
is_run = models.IntegerField(editable=True, choices=INT_CHOICES, default=0)
is_virtual_machine = models.IntegerField(editable=True, choices=INT_CHOICES, default=0)
is_dynamic = models.IntegerField(editable=True, choices=INT_CHOICES, default=0)
is_deleted = models.IntegerField(editable=True, choices=INT_CHOICES, default=0)
create_time = models.DateTimeField(auto_now_add=True)
update_time = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'host'
ordering = ['-uuid']
app_label = 'cmdb'
def __unicode__(self):
return self.uuid
def search_name(self):
return '%s: %s # %s # %s # %s # %s # %s # %s' % (self.__class__.__name__, self.uuid, self.roles, self.physical_server.__unicode__(), self.salt_id, self.operating_system, self.os_version, self.host_name)
def get_absolute_url(self):
return reverse('cmdb:edit_host', args=[self.id])
#为了在模板标签中可以使用items方法
def items(self):
return [(field, field.value_to_string(self)) for field in Host._meta.fields]
def delete(self, *args, **kwargs):
super(Host, self).delete(*args, **kwargs)
def save(self, *args, **kwargs):
if self.id is not None :
host = Host.objects.get(pk=self.id)
else:
print 'Alter'
super(Host, self).save(*args, **kwargs)
|
nilq/baby-python
|
python
|
# -*- coding:utf-8 -*-
from unittest import TestCase
from simstring.measure.cosine import CosineMeasure
class TestCosine(TestCase):
measure = CosineMeasure()
def test_min_feature_size(self):
self.assertEqual(self.measure.min_feature_size(5, 1.0), 5)
self.assertEqual(self.measure.min_feature_size(5, 0.5), 2)
def test_max_feature_size(self):
self.assertEqual(self.measure.max_feature_size(5, 1.0), 5)
self.assertEqual(self.measure.max_feature_size(5, 0.5), 20)
def test_minimum_common_feature_count(self):
self.assertEqual(self.measure.minimum_common_feature_count(5, 5, 1.0), 5)
self.assertEqual(self.measure.minimum_common_feature_count(5, 20, 1.0), 10)
self.assertEqual(self.measure.minimum_common_feature_count(5, 5, 0.5), 3)
def test_similarity(self):
x = ["a", "ab", "bc", "c"]
y = ["a", "ab", "bc", "cd", "e"]
self.assertEqual(round(self.measure.similarity(x, x), 2), 1.0)
self.assertEqual(round(self.measure.similarity(x, y), 2), 0.67)
z = ["a", "ab", "ba", "ab", "a"]
self.assertEqual(round(self.measure.similarity(z, z), 2), 1.0)
self.assertEqual(round(self.measure.similarity(x, z), 2), 0.58)
self.assertEqual(round(self.measure.similarity(x, y), 2), 0.67)
# Test as per paper trigrams with quotes of methyl sulphone and methyl sulfone
a = [' "m', '"me', 'met', 'eth', 'thy', 'hyl', 'yl ', 'l s', ' su', 'sul', 'ulf', 'lfo', 'fon', 'one', 'ne"', 'e" ']
b = [' "m', '"me', 'met', 'eth', 'thy', 'hyl', 'yl ', 'l s', ' su', 'sul', 'ulp', 'lph', 'pho', 'hon', 'one', 'ne"', 'e" ']
self.assertEqual(round(self.measure.similarity(a, b), 3), 0.788) #BUG? Disagrees with paper that claims should be 0.788
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 23 15:28:07 2020
@author: ESOL
"""
# Import module
import jpype
# Enable Java imports
import jpype.imports
# Pull in types
from jpype.types import *
jpype.addClassPath('C:/Users/esol/OneDrive - Equinor/programming/neqsim/NeqSim.jar')
# Launch the JVM
#jpype.startJVM()
import neqsim
import neqsim.thermo as thermo
fluid1 = thermo.system.SystemSrkEos(303.15, 35.01325)
fluid1.addComponent("nitrogen", 0.0028941);
fluid1.addComponent("CO2", 0.054069291);
fluid1.addComponent("methane", 0.730570915);
fluid1.addComponent("ethane", 0.109004002);
fluid1.addComponent("propane", 0.061518891);
fluid1.addComponent("n-butane", 0.0164998);
fluid1.addComponent("i-butane", 0.006585);
fluid1.addComponent("n-pentane", 0.005953);
fluid1.addComponent("i-pentane", 0.0040184);
fluid1.addTBPfraction("C6", 0.6178399, 86.17801 / 1000.0, 0.6639999);
fluid1.addComponent("water", 0.27082);
fluid1.createDatabase(True);
fluid1.setMixingRule(2);
fluid1.setMultiPhaseCheck(True);
|
nilq/baby-python
|
python
|
from fontbakery.callable import check
from fontbakery.callable import condition
from fontbakery.checkrunner import Section, PASS, FAIL, WARN
from fontbakery.fonts_profile import profile_factory
from tests.test_general import (
is_italic,
com_roboto_fonts_check_italic_angle,
com_roboto_fonts_check_fs_type,
com_roboto_fonts_check_vendorid,
com_roboto_fonts_check_digit_widths,
com_roboto_fonts_check_charset_coverage,
)
profile = profile_factory(default_section=Section("Roboto android v3"))
exclude_glyphs = frozenset([0x00A0])
ROBOTO_PROFILE_CHECKS = [
"com.roboto.fonts/check/vertical_metrics",
"com.roboto.fonts/check/italic_angle",
"com.roboto.fonts/check/fs_type",
"com.roboto.fonts/check/vendorid",
"com.roboto.fonts/check/digit_widths",
"com.roboto.fonts/check/glyph_dont_round_to_grid",
"com.roboto.fonts/check/charset_coverage",
]
@condition
def include_glyphs():
return frozenset([
0x2117, # SOUND RECORDING COPYRIGHT
0xEE01, 0xEE02, 0xF6C3]
) # legacy PUA
@condition
def exclude_glyphs():
return frozenset([
0x20E3, # COMBINING ENCLOSING KEYCAP
0x2191, # UPWARDS ARROW
0x2193, # DOWNWARDS ARROW
0x2072, 0x2073, 0x208F] + # unassigned characters
list(range(0xE000, 0xF8FF + 1)) + list(range(0xF0000, 0x10FFFF + 1)) # other PUA
) - include_glyphs() # don't exclude legacy PUA
@check(
id="com.roboto.fonts/check/glyph_dont_round_to_grid",
)
def com_roboto_fonts_check_glyph_dont_round_to_grid(ttFont):
"""Test certain glyphs don't round to grid"""
failed = False
glyphset = ttFont.getGlyphSet()
for name in ["ellipsis"]:
glyph = glyphset[name]._glyph
for component in glyph.components:
if component.flags & (1 << 2):
failed = True
yield FAIL, f"Round to grid flag must be disabled for '{name}' components"
if not failed:
yield PASS, "Glyphs do not have round to grid enabled"
# test names
@check(
id="com.roboto.fonts/check/vertical_metrics",
)
def com_roboto_fonts_check_vertical_metrics(ttFont):
"""Check vertical metrics are correct"""
failed = []
expected = {
# Android values come from v2.136 android fonts
# https://github.com/googlefonts/roboto/releases/tag/v2.136
("head", "yMin"): -555,
("head", "yMax"): 2163,
("hhea", "descent"): -500,
("hhea", "ascent"): 1900,
("hhea", "lineGap"): 0,
("OS/2", "sTypoDescender"): -555,
("OS/2", "sTypoAscender"): 2146,
("OS/2", "sTypoLineGap"): 0,
("OS/2", "usWinDescent"): 555,
("OS/2", "usWinAscent"): 2146,
}
for (table, k), v in expected.items():
font_val = getattr(ttFont[table], k)
if font_val != v:
failed.append((table, k, v, font_val))
if not failed:
yield PASS, "Fonts have correct vertical metrics"
else:
msg = "\n".join(
[
f"- {tbl}.{k} is {font_val} it should be {v}"
for tbl, k, v, font_val in failed
]
)
yield FAIL, f"Fonts have incorrect vertical metrics:\n{msg}"
# ligatures
profile.auto_register(globals())
profile.test_expected_checks(ROBOTO_PROFILE_CHECKS, exclusive=True)
|
nilq/baby-python
|
python
|
import socket as sk
import sys
import threading
from PyQt4.QtCore import *
MAX_THREADS = 50
#def usage():
#print("\npyScan 0.1")
#print("usage: pyScan <host> [start port] [end port]")
class Scanner(threading.Thread):
def __init__(self, host, port):
threading.Thread.__init__(self)
# host and port
self.host = host
self.port = port
# build up the socket obj
self.sd = sk.socket(sk.AF_INET, sk.SOCK_STREAM)
def run(self):
try:
# connect to the given host:port
self.sd.connect((self.host, self.port))
print("%s:%d OPEN" % (self.host, self.port))
#self.emit('SIGNAL(QString),OPEN')
self.sd.close()
except: pass
class pyScan:
def __init__(self, args=[]):
# arguments vector
self.args = args
# start port and end port
self.start, self.stop = 1, 1024
# host name
self.host = ""
# check the arguments
if len(self.args) == 4:
self.host = self.args[1]
try:
self.start = int(self.args[2])
self.stop = int(self.args[3])
except ValueError:
#usage()
return
if self.start > self.stop:
#usage()
return
elif len(self.args) == 2:
self.host = self.args[1]
else:
#usage()
return
try:
sk.gethostbyname(self.host)
except:
print("hostname '%s' unknown" % self.host)
self.scan(self.host, self.start, self.stop)
def scan(self, host, start, stop):
self.port = start
while self.port <= stop:
while threading.activeCount() < MAX_THREADS:
Scanner(host, self.port).start()
self.port += 1
if __name__ == "__main__":
pyScan(sys.argv)
'''
#############################################################
# a simple portscanner with multithreading
# QUEUE BASED VERSION
import socket
import sys
import threading, Queue
MAX_THREADS = 50
class Scanner(threading.Thread):
def __init__(self, inq, outq):
threading.Thread.__init__(self)
self.setDaemon(1)
# queues for (host, port)
self.inq = inq
self.outq = outq
def run(self):
while 1:
host, port = self.inq.get()
sd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# connect to the given host:port
sd.connect((host, port))
except socket.error:
# set the CLOSED flag
self.outq.put((host, port, 'CLOSED'))
else:
self.outq.put((host, port, 'OPEN'))
sd.close()
def scan(host, start, stop, nthreads=MAX_THREADS):
toscan = Queue.Queue()
scanned = Queue.Queue()
scanners = [Scanner(toscan, scanned) for i in range(nthreads)]
for scanner in scanners:
scanner.start()
hostports = [(host, port) for port in xrange(start, stop+1)]
for hostport in hostports:
toscan.put(hostport)
results = {}
for host, port in hostports:
while (host, port) not in results:
nhost, nport, nstatus = scanned.get()
results[(nhost, nport)] = nstatus
status = results[(host, port)]
if status <> 'CLOSED':
print '%s:%d %s' % (host, port, status)
if __name__ == '__main__':
scan('localhost', 0, 1024)
'''
None
|
nilq/baby-python
|
python
|
"""
Tyson Reimer
October 08th, 2020
"""
import os
import numpy as np
import pandas as pd
import statsmodels.api as sm
from scipy.stats import norm
from umbms import get_proj_path, get_script_logger
from umbms.loadsave import load_pickle
###############################################################################
__DATA_DIR = os.path.join(get_proj_path(), 'output/by-adi-preds/')
phant_info_dir = os.path.join(get_proj_path(), 'data/phant-info/')
###############################################################################
if __name__ == "__main__":
logger = get_script_logger(__file__)
# Load metadata lists of correct and incorrect predictions
cor_preds = load_pickle(os.path.join(__DATA_DIR, 'byadi_cor_preds.pickle'))
incor_preds = load_pickle(os.path.join(__DATA_DIR,
'byadi_incor_preds.pickle'))
# Define list of metadata dicts for all predictions
all_preds = cor_preds + incor_preds
# Define array indicating correct vs incorrect prediction
pred_labels = np.zeros([len(all_preds), ])
pred_labels[:len(cor_preds)] = 1
# Load phantom info
phant_info = np.genfromtxt(os.path.join(phant_info_dir, 'phant_info.csv'),
delimiter=',',
dtype=['<U20', '<U20', float, float, float])
# All phantom IDs
phant_ids = np.array(['%s%s' % (ii[0], ii[1]) for ii in phant_info])
# Init dicts for phantom density and breast volume
phant_densities = dict()
phant_vols = dict()
for ii in range(len(phant_ids)):
# Store the fibroglandular % by volume
phant_densities[phant_ids[ii]] = 100 * phant_info[ii][2]
# Store the adipose volume in cubic cm
phant_vols[phant_ids[ii]] = phant_info[ii][3] / (10 * 10 * 10)
tum_presence = np.array([~np.isnan(md['tum_rad']) for md in all_preds])
tum_preds = np.array(all_preds)[tum_presence]
tum_labels = pred_labels[tum_presence]
healthy_preds = np.array(all_preds)[~tum_presence]
healthy_labels = pred_labels[~tum_presence]
###########################################################################
logger.info('TUMOUR PREDICTIONS')
# Init metadata dataframe
md_df = pd.DataFrame()
# Get the fibroglandular polar radii
fib_polar_rad = np.array([np.sqrt((md['fib_x'] - md['adi_x']) ** 2
+ (md['fib_y'] - md['adi_y']) ** 2)
for md in tum_preds])
md_df['fib_polar_rad'] = fib_polar_rad
# Get the adipose polar radii
adi_polar_rad = np.array([np.sqrt(md['adi_x'] ** 2 + md['adi_y'] ** 2)
for md in tum_preds])
md_df['adi_polar_rad'] = adi_polar_rad
# Get breast density in % by volume from each scan,
# include in dataframe
density = np.array([phant_densities[md['phant_id']] for md in tum_preds])
md_df['density'] = density
# Get Adipose ID from each scan, include in dataframe
adi_vols = np.array([phant_vols[md['phant_id']] for md in tum_preds])
md_df['adi_vol'] = adi_vols
# Get the tumor radii from each scan, include in dataframe
tum_rads = np.array([md['tum_rad'] for md in tum_preds])
tum_rads[np.isnan(tum_rads)] = 0
md_df['tum_rad'] = tum_rads
# Get tumor polar radii from each scan, include in dataframe
tum_polar_rad = np.array([np.sqrt((md['tum_x'] - md['adi_x']) ** 2
+ (md['tum_y'] - md['adi_y']) ** 2)
for md in tum_preds])
tum_polar_rad[np.isnan(tum_polar_rad)] = 0
md_df['tum_polar_rad'] = tum_polar_rad
# Include tumour z-position in metadata
tum_zs = np.array([md['tum_z'] for md in tum_preds])
tum_zs[np.isnan(tum_zs)] = 0
tum_zs = np.abs(tum_zs)
tum_zs = np.max(tum_zs) - tum_zs
# Convert so that it is the distance from the antenna z-plane
md_df['tum_z'] = tum_zs
tum_in_fib = np.array([(md['tum_in_fib']) for md in tum_preds])
md_df['tum_in_fib'] = tum_in_fib
# Store prediction score in dataframe
md_df['pred_score'] = tum_labels
# Create logistic regression model
model = sm.GLM.from_formula("pred_score ~ "
" adi_vol "
" + density"
" + fib_polar_rad"
" + adi_polar_rad"
" + tum_rad"
" + tum_polar_rad"
" + tum_z"
" + C(tum_in_fib)"
,
family=sm.families.Binomial(),
data=md_df)
results = model.fit()
# Report results
logger.info(results.summary2())
logger.info('\tp-values:')
logger.info('\t\t%s' % results.pvalues)
# Critical value - look at 95% confidence intervals
zstar = norm.ppf(0.95)
# Report odds ratio and significance level results
for ii in results.params.keys():
logger.info('\t%s' % ii) # Print metadata info
coeff = results.params[ii]
std_err = results.bse[ii]
odds_ratio = np.exp(coeff) # Get odds ratio
# Get 95% C.I. for odds ratio
or_low = np.exp(coeff - zstar * std_err)
or_high = np.exp(coeff + zstar * std_err)
# Get p-val
pval = results.pvalues[ii]
logger.info('\t\tOdds ratio:\t\t\t%.3e\t(%.3e,\t%.3e)'
% (odds_ratio, or_low, or_high))
logger.info('\t\tp-value:\t\t\t%.3e' % pval)
###########################################################################
print('\n' * 5)
logger.info('HEALTHY PREDICTIONS')
# Init metadata dataframe
md_df = pd.DataFrame()
# Get the fibroglandular polar radii
fib_polar_rad = np.array([np.sqrt((md['fib_x'] - md['adi_x']) ** 2
+ (md['fib_y'] - md['adi_y']) ** 2)
for md in healthy_preds])
md_df['fib_polar_rad'] = fib_polar_rad
# Get the adipose polar radii
adi_polar_rad = np.array([np.sqrt(md['adi_x'] ** 2 + md['adi_y'] ** 2)
for md in healthy_preds])
md_df['adi_polar_rad'] = adi_polar_rad
# Get breast density in % by volume from each scan,
# include in dataframe
density = np.array([phant_densities[md['phant_id']]
for md in healthy_preds])
md_df['density'] = density
# Get Adipose ID from each scan, include in dataframe
adi_vols = np.array([phant_vols[md['phant_id']] for md in healthy_preds])
md_df['adi_vol'] = adi_vols
# Store prediction score in dataframe
md_df['pred_score'] = healthy_labels
# Create logistic regression model
model = sm.GLM.from_formula("pred_score ~ "
" adi_vol "
" + density"
" + fib_polar_rad"
" + adi_polar_rad"
,
family=sm.families.Binomial(),
data=md_df)
results = model.fit()
# Report results
logger.info(results.summary2())
logger.info('\tp-values:')
logger.info('\t\t%s' % results.pvalues)
# Critical value - look at 95% confidence intervals
zstar = norm.ppf(0.95)
# Report odds ratio and significance level results
for ii in results.params.keys():
logger.info('\t%s' % ii) # Print metadata info
coeff = results.params[ii]
std_err = results.bse[ii]
odds_ratio = np.exp(coeff) # Get odds ratio
# Get 95% C.I. for odds ratio
or_low = np.exp(coeff - zstar * std_err)
or_high = np.exp(coeff + zstar * std_err)
# Get p-val
pval = results.pvalues[ii]
logger.info('\t\tOdds ratio:\t\t\t%.3e\t(%.3e,\t%.3e)'
% (odds_ratio, or_low, or_high))
logger.info('\t\tp-value:\t\t\t%.3e' % pval)
|
nilq/baby-python
|
python
|
#encoding=utf-8
import sys
#encoding=utf-8
'''
SocialMiner
https://github.com/paulyang0125/SocialMiner
Copyright (c) 2015 Yao-Nien, Yang
Licensed under the MIT license.
'''
import re
from optparse import OptionParser
import nltk
#from nltk import *
import nltk.cluster
import nltk.cluster.kmeans
import nltk.cluster.gaac
import numpy
from nltk.corpus import movie_reviews
from nltk.corpus import wordnet
#from nltk_contrib.wordnet import *
import pickle
import time
import logging
### TODO
### 1. how to decide which used_classifier should be used - Naive, SVM ???
logger = logging.getLogger('myapp')
#logger.setLevel(logging.DEBUG)
logger.info('classification.py started')
def stripLabels(testFeatures):
"""
Strips label from a test sentence feature vector
"""
return [testFeatures[i][0] for i in range(len(testFeatures))]
def selectTrainingTestFeatures(featureVectors, cvstart, cvlength, sentences):
"""
Selects training and test feature subsets.
Training set is the contingent sublist from location cvstart to cvlength
"""
testmappingList = []
trainmappingList = []
test = []
train = []
#test = [featureVectors[i] for i in range(len(featureVectors)) if cvstart <= i < cvstart + cvlength]
myindex1 = 0
myindex2 = 0
for i in range(len(featureVectors)):
mappingdic = {}
if cvstart <= i < cvstart + cvlength:
test.append(featureVectors[i])
mappingdic["before_ID"] = i
#mappingdic["after_ID"] = test.index(featureVectors[i]) # index only return the first element that matches
mappingdic["after_ID"] = myindex1
testmappingList.append(mappingdic)
myindex1 += 1
#train = [featureVectors[i] for i in range(len(featureVectors)) if i < cvstart or cvstart + cvlength <= i]
for i in range(len(featureVectors)):
mappingdic = {}
if i < cvstart or cvstart + cvlength <= i:
train.append(featureVectors[i])
mappingdic["before_ID"] = i
#mappingdic["after_ID"] = train.index(featureVectors[i])
mappingdic["after_ID"] = myindex2
trainmappingList.append(mappingdic)
myindex2 += 1
testSents = [sentences[i] for i in range(len(featureVectors)) if cvstart <= i < cvstart + cvlength]
assert len(featureVectors) == len(test) + len(train)
assert len(testSents) == len(test)
logger.debug("testmappingList:")
print "testmappingList:"
logger.debug(testmappingLis)
print testmappingList
time.sleep(0.1)
#print "trainmappingList:"
#print trainmappingList
return train, test, testSents, testmappingList
def selectPredictionTestFeatures(featureVectors, sentences):
testmappingList = []
test = []
#test = [featureVectors[i] for i in range(len(featureVectors))]
myindex = 0
for i in range(len(featureVectors)):
mappingdic = {}
test.append(featureVectors[i])
mappingdic["before_ID"] = i
print("exam the feature vector:")
print (featureVectors[i])
logger.debug("exam the feature vector:")
logger.debug(featureVectors[i])
mappingdic["after_ID"] = myindex
testmappingList.append(mappingdic)
myindex += 1
testSents = [sentences[i] for i in range(len(featureVectors))]
#print "testmappingList:"
logger.debug("testmappingList:")
#print testmappingList
logger.debug(testmappingList)
time.sleep(0.1)
return test, testSents, testmappingList
def classify_prediction(testFeatures, testSentences, messages, opt, used_classifier, testmappingList):
#predictedLabelsDic = {}
testFeaturesD = stripLabels(testFeatures)
assert (testFeatures != None)
classifier = used_classifier
predictedLabels = classifier.batch_classify(testFeaturesD)
print "start to assign the prediction tag into sentence obj"
logger.info("start to assign the prediction tag into sentence obj")
for msgObj in messages:
for senObj in msgObj.sentences:
for id, label in enumerate(predictedLabels):
for test in testmappingList:
if test["after_ID"] == id and senObj.vector_id == test["before_ID"]:
if label == "Neutr": senObj.predict_opinion = 0
elif label == "Neg": senObj.predict_opinion = -1
elif label == "Pos": senObj.predict_opinion = 1
else:
print "no tag, error!!"
logger.error("no tag, error!!")
#for id, labels in enumerate(predictedLabels)
#vectorIDAssign = lambda n: 'http://www.ptt.cc/bbs/' + board_name + '/index' + str(n) + '.html'
## assign result to sentenceObj
assert (len(predictedLabels) == len(testSentences))
stats_total = len(predictedLabels)
return (stats_total, predictedLabels)
def classify_training(trainingFeatures, testFeatures, testSentences, messages, opt, testmappingList):
"""
Classifies the feature vectos.
"""
assert (trainingFeatures != None and testFeatures != None)
classifier = None;
if (opt['cl_naive_bayes']):
if opt['verbose']: print "Running NaiveBayes classifier"
classifier = nltk.NaiveBayesClassifier.train(trainingFeatures)
print "init accuracy for Naive:"
logger.info("init accuracy for Naive:")
print nltk.classify.accuracy(classifier, testFeatures)
logger.info(nltk.classify.accuracy(classifier, testFeatures))
#### TODO #####
elif opt['cl_max_entropy'] != None:
if opt['verbose']:
logger.info("Running maximum entropy classifier")
print "Running maximum entropy classifier"
if opt['cl_max_entropy'] == "default": algorithm = None
else: algorithm = opt['cl_max_entropy']
traceL=0;
if opt['verbose']: traceL=3;
elif opt['verbose_all']: traceL=5;
classifier = nltk.MaxentClassifier.train(trainingFeatures, algorithm, traceL, max_iter=7)
elif opt['cl_decision_tree']:
if opt['verbose']:
logger.info("Running decision tree classifier")
print "Running decision tree classifier"
classifier = nltk.DecisionTreeClassifier.train(trainingFeatures, entropy_cutoff=0.5, depth_cutoff=70, support_cutoff=10)
if classifier == None:
print "No classifier selected! Aborting!"
logger.error("No classifier selected! Aborting!")
exit(1)
testFeaturesD = stripLabels(testFeatures)
predictedLabels = classifier.batch_classify(testFeaturesD)
## shit..........
print "start to assign the prediction tag into sentence obj"
logger.info("start to assign the prediction tag into sentence obj")
for msgObj in messages:
for senObj in msgObj.sentences:
for id, label in enumerate(predictedLabels):
for test in testmappingList:
if test["after_ID"] == id and senObj.vector_id == test["before_ID"]:
if label == "Neutr": senObj.predict_opinion = 0
elif label == "Neg": senObj.predict_opinion = -1
elif label == "Pos": senObj.predict_opinion = 1
else:
print "no tag, error!!"
logger.error("no tag, error!!")
assert (len(predictedLabels) == len(testSentences))
stats_total = 0
stats_correct = 0
for origFV, newLabel in map(None, testFeatures, predictedLabels):
origLabel = origFV[1]
stats_total = stats_total + 1
if origLabel == newLabel: stats_correct = stats_correct + 1
if opt['verbose']:
for l in classifier.labels():
print "'%s'\t" % l,
logger.info("'%s'\t" % l,)
print "L_orig\tL_new"
logger.info("L_orig\tL_new")
trainingFeaturesD = stripLabels(trainingFeatures)
predLabs2 = classifier.batch_classify(trainingFeaturesD)
probcfs = None
try:
probcfs = classifier.batch_prob_classify(trainingFeaturesD)
except Exception:
probcfs = ["-" for t in trainingFeaturesD]
for pdist, origFV, newLabel in map(None, probcfs, trainingFeatures, predLabs2):
origLabel = origFV[1]
for l in classifier.labels():
if pdist != "-":
print "%.3f\t" % pdist.prob(l),
logger.info("%.3f\t" % pdist.prob(l),)
else:
print "- \t",
logger.info("- \t",)
print " %s\t%s" % (origLabel, newLabel),
logger.info(" %s\t%s" % (origLabel, newLabel),)
print ""
logger.info("")
##### start to use testset with the text showed
probcfs = None
try:
probcfs = classifier.batch_prob_classify(testFeaturesD)
except Exception:
probcfs = ["-" for t in testFeaturesD]
for pdist, origFV, newLabel, sent in map(None, probcfs, testFeatures, predictedLabels, testSentences):
origLabel = origFV[1]
for l in classifier.labels():
if pdist != "-":
print "%.3f\t" % pdist.prob(l),
logger.info("%.3f\t" % pdist.prob(l),)
else:
print "- \t",
logger.info("- \t",)
print " %s\t%s" % (origLabel, newLabel),
logger.info(" %s\t%s" % (origLabel, newLabel),)
if opt['verbose_all']:
print "\t%s" % sent.text
logger.debug("\t%s" % sent.text)
else:
print ""
logger.info("")
stats_perc = 100.0 * stats_correct / stats_total
f_measure = evaluateClassificationBCubed([f[1] for f in testFeatures], predictedLabels, opt)
if opt['verbose']:
if not (opt['cl_naive_bayes'] or not opt['cl_max_entropy']):
classifier.show_most_informative_features()
return (stats_correct, stats_total, stats_perc, f_measure, classifier, predictedLabels)
def evaluateClassificationBCubed(originalLabels, newLabels, opt):
label1 = None; label2 = None
A = 0; B = 0; C = 0; D = 0;
labelPair = map(None, originalLabels, newLabels)
precision = 0.0
recall = 0.0
for (e1o, e1n) in labelPair:
sameNew = [ (e2o, e2n) for e2o, e2n in labelPair if e1n == e2n ] ## same cluster
sameOld = [ (e2o, e2n) for e2o, e2n in labelPair if e1o == e2o ] ## same category
sameBoth = [(e2o, e2n) for e2o, e2n in labelPair if e1o == e2o and e1n == e2n] ## same cluster and category
precision = precision + 1.0/len(sameNew) * len(sameBoth)
recall = recall + 1.0/len(sameOld) * len(sameBoth)
precision = precision / len(originalLabels)
recall = recall / len(originalLabels)
print precision, recall
logger.info(precision, recall)
Fmeasure = 2 * precision * recall / ( precision + recall )
return Fmeasure
def processClassification(mode, featureVectors, allSentences, messages, options, used_classifier = None):
if options['training']:
print "training mode for Classification!"
logger.info("training mode for Classification!")
##featureVectors for training : [({'f1':'','f2':''}, 'Subj'), (), () ]
crossvalidate = int(1 + 0.01 * len(featureVectors) * float(options['o_crossvalidate']))
crosslen = int(0.01 * float(options['o_testpercentage']) * len(featureVectors) + 1)
useCrossvalidation = options['o_crossvalidate'] != -1
cvstart = 0
if not useCrossvalidation:
cvstart = len(featureVectors) - crosslen
crossvalidate = crosslen
results = []
i = 0
while cvstart < len(featureVectors):
## divide features in training and test set
featureTraining, featureTest, testSentences, testmappingList = selectTrainingTestFeatures(featureVectors, cvstart, crosslen, allSentences)
assert len(featureTraining) > 0 , "There must exist some training features"
assert len(featureTest) > 0 , "There must exist some test features"
## perform classification
## res = tuple - (stats_correct, stats_total, stats_perc, f_measure, classifier)
res = classify_training(featureTraining, featureTest, testSentences, messages, options, testmappingList)
used_classifier = res[4] ## this is classifier, gonna save
results.append(res)
print "Run %d. Correct: %d / %d (%5.3f %%) [F = %5.3f] "%(i, res[0], res[1], res[2], res[3])
logger.info("Run %d. Correct: %d / %d (%5.3f %%) [F = %5.3f] "%(i, res[0], res[1], res[2], res[3]))
cvstart = cvstart + crossvalidate
i = i + 1
return evaluateResults(results, used_classifier)
else:
print "prediction mode for Classification!"
logger.info("prediction mode for Classification!")
##featureVectors for predict : [({'f1':'','f2':''}, 'na'), (), () ]
featureTest, testSentences, testmappingList = selectPredictionTestFeatures(featureVectors, allSentences)
assert len(featureTest) > 0 , "There must exist some test features"
res = classify_prediction(featureTest, testSentences, messages, options, used_classifier, testmappingList)
stat_all = res[0]; predict_results = res[1]
return stat_all , predict_results
def evaluateResults(results, used_classifier):
avg_correct = 0; avg_all = 0; avg_perc = 0; avg_f = 0
classifiersList = []
for r in results:
avg_correct = avg_correct + r[0]
avg_all = avg_all + r[1]
avg_f = avg_f + r[3]
classifiersList.append(r[4])
avg_perc = 100.0 * avg_correct / avg_all
total_runs = len(results)
avg_correct = avg_correct / total_runs
avg_f = avg_f / total_runs
avg_all = avg_all / total_runs
#saveClassifier(classifiersList)
print "RESULTS after %d runs" % total_runs
logger.info("RESULTS after %d runs" % total_runs)
print "Correct: %d / %d (%5.3f %%) [F = %5.3f]" % (avg_correct, avg_all, avg_perc, avg_f)
logger.info("Correct: %d / %d (%5.3f %%) [F = %5.3f]" % (avg_correct, avg_all, avg_perc, avg_f))
# output of process(.)
return (avg_correct, avg_all, avg_perc, avg_f, used_classifier), used_classifier
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Allow creation of uart/console interface via usb google serial endpoint."""
import argparse
import array
import exceptions
import os
import sys
import termios
import threading
import time
import traceback
import tty
try:
import usb
except:
print "import usb failed"
print "try running these commands:"
print " sudo apt-get install python-pip"
print " sudo pip install --pre pyusb"
print ""
sys.exit(-1)
"""Class Susb covers USB device discovery and initialization.
It can find a particular endpoint by vid:pid, serial number,
and interface number.
"""
class SusbError(Exception):
"""Class for exceptions of Susb."""
def __init__(self, msg, value=0):
"""SusbError constructor.
Args:
msg: string, message describing error in detail
value: integer, value of error when non-zero status returned. Default=0
"""
super(SusbError, self).__init__(msg, value)
self.msg = msg
self.value = value
class Susb():
"""Provide USB functionality.
Instance Variables:
_read_ep: pyUSB read endpoint for this interface
_write_ep: pyUSB write endpoint for this interface
"""
READ_ENDPOINT = 0x81
WRITE_ENDPOINT = 0x1
TIMEOUT_MS = 100
def __init__(self, vendor=0x18d1,
product=0x500f, interface=1, serialname=None):
"""Susb constructor.
Discovers and connects to USB endpoints.
Args:
vendor : usb vendor id of device
product : usb product id of device
interface : interface number ( 1 - 8 ) of device to use
serialname: string of device serialnumber.
Raises:
SusbError: An error accessing Susb object
"""
# Find the device.
dev_list = usb.core.find(idVendor=vendor, idProduct=product, find_all=True)
if dev_list is None:
raise SusbError("USB device not found")
# Check if we have multiple devices.
dev = None
if serialname:
for d in dev_list:
dev_serial = "PyUSB doesn't have a stable interface"
try:
dev_serial = usb.util.get_string(d, 256, d.iSerialNumber)
except:
dev_serial = usb.util.get_string(d, d.iSerialNumber)
if dev_serial == serialname:
dev = d
break
if dev is None:
raise SusbError("USB device(%s) not found" % serialname)
else:
try:
dev = dev_list[0]
except:
try:
dev = dev_list.next()
except:
raise SusbError("USB device %04x:%04x not found" % (vendor, product))
# If we can't set configuration, it's already been set.
try:
dev.set_configuration()
except usb.core.USBError:
pass
# Get an endpoint instance.
cfg = dev.get_active_configuration()
intf = usb.util.find_descriptor(cfg, bInterfaceNumber=interface)
self._intf = intf
if not intf:
raise SusbError("Interface not found")
# Detach raiden.ko if it is loaded.
if dev.is_kernel_driver_active(intf.bInterfaceNumber) is True:
dev.detach_kernel_driver(intf.bInterfaceNumber)
read_ep_number = intf.bInterfaceNumber + self.READ_ENDPOINT
read_ep = usb.util.find_descriptor(intf, bEndpointAddress=read_ep_number)
self._read_ep = read_ep
write_ep_number = intf.bInterfaceNumber + self.WRITE_ENDPOINT
write_ep = usb.util.find_descriptor(intf, bEndpointAddress=write_ep_number)
self._write_ep = write_ep
"""Suart class implements a stream interface, to access Google's USB class.
This creates a send and receive thread that monitors USB and console input
and forwards them across. This particular class is hardcoded to stdin/out.
"""
class SuartError(Exception):
"""Class for exceptions of Suart."""
def __init__(self, msg, value=0):
"""SuartError constructor.
Args:
msg: string, message describing error in detail
value: integer, value of error when non-zero status returned. Default=0
"""
super(SuartError, self).__init__(msg, value)
self.msg = msg
self.value = value
class Suart():
"""Provide interface to serial usb endpoint."""
def __init__(self, vendor=0x18d1, product=0x501c, interface=0,
serialname=None):
"""Suart contstructor.
Initializes USB stream interface.
Args:
vendor: usb vendor id of device
product: usb product id of device
interface: interface number of device to use
serialname: Defaults to None.
Raises:
SuartError: If init fails
"""
self._susb = Susb(vendor=vendor, product=product,
interface=interface, serialname=serialname)
self._exit = False
def exit(self):
self._exit = True
def running(self):
return (not self._exit)
def __del__(self):
"""Suart destructor."""
self.exit()
def run_rx_thread(self):
while self.running():
try:
r = self._susb._read_ep.read(64, self._susb.TIMEOUT_MS)
if r:
sys.stdout.write(r.tostring())
sys.stdout.flush()
except Exception as e:
# If we miss some characters on pty disconnect, that's fine.
# ep.read() also throws USBError on timeout, which we discard.
if type(e) not in [exceptions.OSError, usb.core.USBError]:
print "rx %s" % e
def run_tx_thread(self):
while self.running():
try:
r = sys.stdin.read(1)
if r == '\x03':
self.exit()
if r:
self._susb._write_ep.write(array.array('B', r), self._susb.TIMEOUT_MS)
except Exception as e:
print "tx %s" % e
def run(self):
"""Creates pthreads to poll USB & PTY for data.
"""
self._exit = False
self._rx_thread = threading.Thread(target=self.run_rx_thread, args=[])
self._rx_thread.daemon = True
self._rx_thread.start()
self._tx_thread = threading.Thread(target=self.run_tx_thread, args=[])
self._tx_thread.daemon = True
self._tx_thread.start()
"""Terminal settings cleanup."""
def force_exit():
global old_settings
global fd
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
os.system("stty echo")
sys.exit(0)
"""Command line functionality
Allows specifying vid:pid, serialnumber, interface.
Ctrl-C exits.
"""
parser = argparse.ArgumentParser(description="Open a console to a USB device")
parser.add_argument('-d', '--device', type=str,
help="vid:pid of target device", default="18d1:501c")
parser.add_argument('-i', '--interface', type=int,
help="interface number of console", default=0)
parser.add_argument('-s', '--serialno', type=str,
help="serial number of device", default="")
def main():
args = parser.parse_args()
vidstr, pidstr = args.device.split(':')
vid = int(vidstr, 16)
pid = int(pidstr, 16)
serialno = args.serialno
interface = args.interface
sobj = Suart(vendor=vid, product=pid, interface=interface,
serialname=serialno)
try:
tty.setraw(sys.stdin.fileno())
except:
pass
sobj.run()
# run() is a thread so just busy wait to mimic server
while sobj.running():
time.sleep(.1)
if __name__ == '__main__':
global old_settings
global fd
try:
os.system("stty -echo")
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
except:
pass
try:
main()
except KeyboardInterrupt:
sobj.exit()
except Exception as e:
try:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
os.system("stty echo")
finally:
traceback.print_exc()
finally:
force_exit()
|
nilq/baby-python
|
python
|
import numpy as np
# general convolve framework
def convframe(input, weight, output=None, init=0,
mode='reflect', buffertype=None, keeptype=True, func=None):
if output is None:
output = np.zeros(input.shape, buffertype or input.dtype)
output[:] = input if init is None else init
buf = np.zeros_like(output)
coreshp = weight.shape; coremar = np.array(weight.shape)//2
padimg = np.pad(input, [(i,i) for i in coremar], mode=mode)
rcs = np.mgrid[tuple([slice(i) for i in coreshp])]
rcs = rcs.reshape(input.ndim, -1).T
for idx, w in zip(rcs, weight.ravel()):
start, end = idx, idx + input.shape
s = [slice(i,j) for i,j in zip(start, end)]
buf[:] = padimg[tuple(s)]
func(buf, output, w)
return output.astype(input.dtype) if keeptype else output
# split convolve in axis
def axisframe(img, core, mode='reflect', f=None):
dtype = img.dtype
for i in range(len(core)):
shape = np.ones(img.ndim, dtype=np.int8)
shape[i] = -1
if core[i].size == 1:
img = img * core[i]
continue
c = core[i].reshape(shape)
print(c.shape)
img = f(img, c, output=None, mode=mode, keeptype=False)
return img.astype(dtype)
def convolve(input, weight, output=None, mode='reflect', keeptype=True):
def f(buf, output, w): buf *= w; output += buf
return convframe(input, weight, output, 0, mode, 'float32', keeptype, f)
def uniform_filter(img, size=3, mode='reflect'):
if not hasattr(size, '__len__'): size = [size] * img.ndim
def core(s):
if s<=1: return np.array([1])
return np.ones(s).astype('float32')/s
cores = [core(i) for i in size]
return axisframe(img, cores, mode, convolve)
def gaussian_filter(img, sig=2, mode='reflect'):
if not hasattr(sig, '__len__'): sig = [sig] * img.ndim
def core(s):
if s==0: return np.array([1])
x = np.arange(-int(s*2.5+0.5), int(s*2.5+0.5)+1)
return np.exp(-x**2/2/s**2)/s/(2*np.pi)**0.5
cores = [core(i) for i in sig]
return axisframe(img, cores, mode, convolve)
def _maximum_filter(input, weight=None, output=None, mode='reflect', keeptype=True):
def f(buf, output, w):
if w>0: np.maximum(buf, output, out=output)
return convframe(input, weight, output, None, mode, None, keeptype, f)
def maximum_filter(input, size=None, footprint=None, output=None, mode='reflect', keeptype=True):
if not footprint is None:
return _maximum_filter(input, footprint, output, mode)
if not hasattr(size, '__len__'): size = [size]*input.ndim
cores = [np.ones(i, 'bool') for i in size]
return axisframe(input, cores, mode, _maximum_filter)
def _minimum_filter(input, weight=None, output=None, mode='reflect', keeptype=True):
def f(buf, output, w):
if w>0: np.minimum(buf, output, out=output)
return convframe(input, weight, output, None, mode, None, keeptype, f)
def minimum_filter(input, size=None, footprint=None, output=None, mode='reflect', keeptype=True):
if not footprint is None:
return _minimum_filter(input, footprint, output, mode)
if not hasattr(size, '__len__'): size = [size]*input.ndim
cores = [np.ones(i, 'bool') for i in size]
return axisframe(input, cores, mode, _minimum_filter)
if __name__ == '__main__':
from skimage.data import camera
import matplotlib.pyplot as plt
img = camera()
simg = minimum_filter(img, footprint=np.ones((10,10)))
plt.imshow(simg, cmap='gray')
plt.show()
|
nilq/baby-python
|
python
|
import unittest
from user import User
class UserTest(unittest.TestCase):
"""
Test class that defines test cases for the contact class behaviours.
Args:
unittest.TestCase: Inherits the testCase class that helps in creating test cases
"""
def setUp(self):
"""
Set up method to run before each test cases.
"""
self.new_user = User("user100", "1100")
def test_init(self):
"""
test_init test case to test if the object is initialized properly
"""
self.assertEqual(self.new_user.login_name, "user100")
self.assertEqual(self.new_user.pin, "1100")
def test_save_user(self):
"""
test_save_user test case to test if the user object is saved into
the user list
"""
self.new_user.save_user()
self.assertEqual(len(User.user_list),1)
def test_user_auth(self):
"""
test_user_auth tests case to authenticate the user
"""
self.assertTrue(self.new_user.user_auth("user100","1100"))
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
# Generated by Django 2.0.8 on 2018-08-12 16:09
from django.db import migrations, models
from django_add_default_value import AddDefaultValue
class Migration(migrations.Migration):
dependencies = [("dadv", "0001_initial")]
operations = [
migrations.CreateModel(
name="TestTextDefault",
fields=[
("id", models.BigAutoField(primary_key=True, serialize=False)),
("description", models.TextField(default="No description provided")),
],
),
AddDefaultValue(
model_name="TestTextDefault",
name="description",
value="No description provided",
),
]
|
nilq/baby-python
|
python
|
import boto3
import json
from datetime import datetime, timedelta
from botocore.client import Config
def handler(event, context):
s3 = boto3.client('s3', config=Config(signature_version='s3v4'))
BUCKET_NAME = 'photostorage113550-dev';
s3_bucket_content = s3.list_objects(Bucket=BUCKET_NAME)['Contents']
contents = []
for obj in s3_bucket_content:
key = obj['Key'].replace('.jpg', '')
params = {'Bucket': BUCKET_NAME, 'Key': obj['Key']}
date = obj['LastModified'] # This is in
print(date)
url = s3.generate_presigned_url('get_object', params, ExpiresIn=600)
contents.append({
'key': key,
'date': date.strftime("%d-%b-%Y %H:%M:%S"),
'url': url
})
return {
'contents': contents
}
|
nilq/baby-python
|
python
|
import collections
import logging
import os
import time
import suds.xsd.doctor
import suds.client
from suds.plugin import MessagePlugin
from suds import WebFault
from . import base
logger = logging.getLogger(__name__)
# Suds has broken array marshaling. See these links:
# http://stackoverflow.com/questions/3519818/suds-incorrect-marshaling-of-array-of-arrays
# https://fedorahosted.org/suds/ticket/340
class FixArrayPlugin(MessagePlugin):
def marshalled(self, context):
command = context.envelope.getChild('Body').getChildren()[0]
# TODO: instead of blacklisting the affected types here, check the
# actual WSDL and fix up any *ArrayArray types.
affected = ('addNodes',
'addDrainingNodes',
'removeNodes',
'removeDrainingNodes',
'disableNodes',
'enableNodes',
'addPool',
)
if command.name in affected:
context.envelope.addPrefix(
'xsd', 'http://www.w3.org/1999/XMLSchema',
)
child_spec = collections.defaultdict(
lambda: 'values',
addPool='nodes', disableNodes='nodes')
values = command.getChild(child_spec[command.name])
values.set('SOAP-ENC:arrayType', 'xsd:list[1]')
values.set('xsi:type', 'SOAP-ENC:Array')
item = values[0]
item.set('SOAP-ENC:arrayType', 'xsd:list[%s]' % len(item.children))
item.set('xsi:type', 'SOAP-ENC:Array')
class StingrayBalancer(base.Balancer):
def __init__(self, config):
self.url = config['URL']
imp = suds.xsd.doctor.Import(
'http://schemas.xmlsoap.org/soap/encoding/')
imp.filter.add('http://soap.zeus.com/zxtm/1.0/')
doctor = suds.xsd.doctor.ImportDoctor(imp)
# zxtm_pool.wsdl must be present in the same directory as this file.
here = os.path.dirname(os.path.realpath(__file__))
wsdl = os.path.join(here, 'stingray_pool.wsdl')
self.client = suds.client.Client(
'file:' + wsdl,
username=config['USER'], password=config['PASSWORD'],
location=self.url, plugins=[doctor, FixArrayPlugin()])
# All pool names will be prefixed with this string.
self.pool_prefix = config.get('POOL_PREFIX', '')
# Stingray has separate calls for disableNodes and removeNodes. The
# latter will interrupt current connections. To minimize disruption,
# we'll call disableNodes first, wait a configurable amount of time,
# and then call removeNodes.
self.grace_period = config.get('GRACE_PERIOD', 2)
def _call_node_func(self, func, pool, nodes):
# Generic function for calling any of the Stingray pool functions that
# accept an array of pools, and an arrayarray of nodes. This function
# will take a single pool and nodelist and do all the necessary
# wrapping.
nodes_wrapper = self.client.factory.create('StringArrayArray')
nodes_array = self.client.factory.create('StringArray')
nodes_array.item = nodes
nodes_wrapper.item = [nodes_array]
func([self.pool_prefix + pool], nodes_wrapper)
def add_nodes(self, pool, nodes):
# Stingray will kindly avoid creating duplicates if you submit a node
# that is already in the pool.
logger.info('Adding nodes %s to pool %s', nodes, pool)
try:
self._call_node_func(self.client.service.addNodes, pool, nodes)
except WebFault as wf:
if 'Unknown pool' in wf.message:
# If pool doesn't exist, create it.
self.add_pool(pool, nodes)
else:
raise
def delete_nodes(self, pool, nodes):
existing_nodes = set(self.get_nodes(pool))
nodes = list(existing_nodes.intersection(nodes))
if not nodes:
logger.info('No nodes to delete from pool %s', pool)
return
logger.info('Deleting nodes %s from pool %s', nodes, pool)
try:
self._call_node_func(self.client.service.disableNodes, pool, nodes)
# wait <grace_period> seconds for connections to finish before
# zapping nodes completely.
time.sleep(self.grace_period)
self._call_node_func(self.client.service.removeNodes, pool, nodes)
except WebFault as wf:
if 'Unknown pool' in wf.message:
# If you try to delete nodes from a pool, and it doesn't exist,
# that's fine.
pass
else:
raise
# Clean up pool in StingRay
self.delete_pool_if_empty(pool)
def add_pool(self, pool, nodes):
logger.info('Adding new pool %s', pool)
self._call_node_func(self.client.service.addPool, pool, nodes)
def delete_pool(self, pool):
logger.info('Deleting pool %s', pool)
try:
self.client.service.deletePool([self.pool_prefix + pool])
except WebFault as wf:
if 'Unknown pool' in str(wf):
pass
else:
raise
def delete_pool_if_empty(self, pool):
nodes = self.get_nodes(pool)
if not nodes:
logger.info('Pool %s is empty', pool)
self.delete_pool(pool)
def get_nodes(self, pool):
logger.info('Getting nodes for pool %s', pool)
try:
# get just the first item from the arrayarray
nodes = self.client.service.getNodes([self.pool_prefix + pool])[0]
except WebFault as wf:
if 'Unknown pool' in wf.message:
return []
else:
raise
# convert the sax text things into real strings
return [str(n) for n in nodes]
|
nilq/baby-python
|
python
|
import os
import random
import sys, getopt
def getDesiredROMCount():
#Asks the user how many roms they want to select from, loops until it gets a valid input
asking = True
numFiles = 0
while asking:
try:
numFiles = int(input("Please enter the number of games you'd like randomly selected for analysis: "))
asking = False
except ValueError:
print("Invalid input, please try again")
return numFiles
def getAllFilesInRomDirectory(romDirectory):
#Lists all files in the working directory
all_files = os.listdir(romDirectory)
if(len(all_files) == 1):
print("Please put this script in the directory containing your rom files")
return []
else:
return all_files
def pickROMS(rom_list, count):
#This function does the randomization from the list of roms obtained
selections = []
i = 0
#Ensures that we don't ask for more ROM files than are available in the folder
lower = min(len(rom_list), count)
while i in range(0, lower):
selections.append(random.choice(rom_list))
i += 1
return selections
def main(dir, romFormats):
print("Welcome to the Game Randomizer.")
print("You can use this small program to pick a specified number of random ROMS from a folder containing a collection of them.")
numFiles = getDesiredROMCount()
all_files = getAllFilesInRomDirectory(dir)
#Filters the rom files from all the files in the directory
rom_files = list(filter(lambda f: f[-3:] in romFormats, all_files))
if(len(rom_files) == 0):
print("No valid ROM files found")
return
#The main loop of the program - picks roms until the user no longer wants to do that
picking = True
while picking:
selected_files = pickROMS(rom_files, numFiles)
print("\nThe games that have been chosen for you are: ")
for count, fileName in enumerate(selected_files):
print(str(count + 1) + ": " + fileName)
pickAgain = str(input("\nDo you want to pick again(Y/N)? ")).upper()
if pickAgain == 'Y':
picking = True
else:
print("Thank you! Goodbye!")
picking = False
if __name__ == '__main__':
directory = os.getcwd()
romformats = ["zip"]
#Parse the command line arguments
try:
options, arguments = getopt.getopt(sys.argv[1:], "hd:f:", ["help", "directory=", "romformat="])
for opt, arg in options:
if opt in ('-h', "--help"):
print("gamerandomizer.py -d <path to search directory> -f <rom file format>")
print("The default rom file formats that are searched for are zip and smc, but to specify any custom formats, enter them as comma separated values with no spaces eg. 'zip,smc'")
sys.exit()
elif opt in ("-d", "--directory"):
directory = arg
elif opt in ("-f", "--romformat"):
romformat = arg.split(',')
else:
raise getopt.GetoptError
except (getopt.GetoptError, ValueError):
#If there is an error parsing the arguments, display the error message and quit
print("You have entered invalid command line arguments. Type 'gamerandomizer.py -h' or 'gamerandomizer.py --help' for usage instructions")
sys.exit()
main(directory, romformats)
|
nilq/baby-python
|
python
|
"""Auth namespace contains the class to manage authentication: Credentials.
It also includes the utility functions
:func:`cartoframes.auth.set_default_credentials` and
:func:`cartoframes.auth.get_default_credentials`."""
from __future__ import absolute_import
from .credentials import Credentials
from .defaults import get_default_credentials, set_default_credentials
__all__ = [
'Credentials',
'set_default_credentials',
'get_default_credentials'
]
|
nilq/baby-python
|
python
|
from telnetlib import Telnet
import os
import sys
import time
#1; E e geo eclip 2018-jan-01 00:00 2018-jan-02 00:00 1d
#ASTNAM=1 TABLE_TYPE= 'ELEMENTS e geo eclip START_TIME='2018-jan-01' STOP_TIME='2018-jan-02' STEP_SIZE='1 d'
tn=Telnet('horizons.jpl.nasa.gov', 6775)
#tn.set_debuglevel(10)
for i in range(30):
tn.read_until(b"Horizons>")
tn.write(b"%d;\n"%(i+1))
parametry=tn.read_until(b"?,<cr>:").decode('ascii')
#print(parametry)
omstart=parametry.find(" OM= ")+5
omend=parametry.find(" ",omstart)
OM= float(parametry[omstart:omend])
print(OM)
tn.write(b"\n")
tn.close()
|
nilq/baby-python
|
python
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A task scheduler for Resolver system node."""
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_scheduler
from tfx.utils import status as status_lib
class ResolverTaskScheduler(task_scheduler.TaskScheduler[task_lib.ExecNodeTask]
):
"""A task scheduler for Resolver system node."""
def schedule(self) -> task_scheduler.TaskSchedulerResult:
return task_scheduler.TaskSchedulerResult(
status=status_lib.Status(code=status_lib.Code.OK),
output=task_scheduler.ResolverNodeOutput(
resolved_input_artifacts=self.task.input_artifacts))
def cancel(self) -> None:
pass
|
nilq/baby-python
|
python
|
from __future__ import division
import torch
import pytorch_warpctc
from ._warp_ctc import *
from .validators import validate_inputs
class CTCAutogradFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, activations, labels, lengths, label_lengths, take_average=True, blank=None):
use_cuda = activations.is_cuda
validate_inputs(activations, labels, lengths, label_lengths)
costs = torch.zeros(activations.size()[0])
activations = torch.transpose(activations, 0, 1).contiguous()
grads = activations.new(activations.size()).zero_()
batch_size = activations.shape[1]
if blank is None:
blank = activations.size()[-1] - 1
if use_cuda:
pytorch_warpctc.gpu_ctc(activations, grads, labels, label_lengths, lengths, batch_size, costs, blank)
else:
pytorch_warpctc.cpu_ctc(activations, grads, labels, label_lengths, lengths, batch_size, costs, blank)
if use_cuda:
costs = costs.cuda()
cost = torch.sum(costs)
grads = grads.transpose_(0, 1).contiguous()
if take_average is True:
cost = cost / costs.size(0)
grads = grads / grads.size()[0]
ctx.grads = grads
return costs.new((cost,))
@staticmethod
def backward(ctx, grad_output):
return ctx.grads, None, None, None, None, None
class CTCLoss(torch.nn.Module):
def __init__(self, take_average=True, blank=None):
super(CTCLoss, self).__init__()
self.take_average = take_average
self.blank = blank
def forward(self, activations, labels, lengths, label_lengths):
return CTCAutogradFunction.apply(
activations,
labels,
lengths,
label_lengths,
self.take_average,
self.blank
)
|
nilq/baby-python
|
python
|
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
a = 10
|
nilq/baby-python
|
python
|
def fill_matrix(matrix, input_var, option=0):
for row in range(input_var[0]):
if option == 1:
row_input = [int(x) for x in input().split(" ")]
else:
row_input = [float(x) for x in input().split(" ")]
matrix.append(row_input)
return
def add_matrix(matrix_a, matrix_b, matrix_out, input_var):
for row_n in range(int(input_var[0])):
new_row = []
for column_n in range(int(input_var[1])):
new_row.append(matrix_a[row_n][column_n] + matrix_b[row_n][column_n])
matrix_out.append(new_row)
return
def print_matrix(matrix):
print("The result is:")
for col in matrix:
print(*col)
return
def multi_matrix_constant(matrix_a, const, matrix_out, input_var):
for row_n in range(int(input_var[0])):
new_row = []
for column_n in range(int(input_var[1])):
new_row.append(matrix_a[row_n][column_n] * const)
matrix_out.append(new_row)
return
def multi_matrix(matrix_a, matrix_b):
rows = len(matrix_a) # number of rows in first matrix
cols = len(matrix_b[0]) # number of columns in second matrix
# matrix m by n filled by 0
matrix_out = [[0 for _ in range(cols)] for _ in range(rows)]
for i in range(rows):
for j in range(cols):
for k in range(len(matrix_b)):
matrix_out[i][j] += matrix_a[i][k] * matrix_b[k][j]
return matrix_out
def main_diagonal(matrix):
rows = len(matrix) # number of rows in matrix
cols = len(matrix[0]) # number of columns in matrix
# matrix m by n filled by 0
matrix_out = [[0 for _ in range(cols)] for _ in range(rows)]
for i in range(rows):
for j in range(cols):
matrix_out[i][j] = matrix[j][i]
return matrix_out
def side_diagonal(matrix):
rows = len(matrix) # number of rows in matrix
cols = len(matrix[0]) # number of columns in matrix
# matrix m by n filled by 0
matrix_out = [[0 for _ in range(cols)] for _ in range(rows)]
matrix_out = main_diagonal(matrix)
for i in reversed(range(rows)):
matrix[rows-i-1] = matrix_out[i][::-1]
return matrix
def vertical_line(matrix):
rows = len(matrix) # number of rows in matrix
cols = len(matrix[0]) # number of columns in matrix
for i in range(rows):
matrix[i] = matrix[i][::-1]
return matrix
def horizontal_line(matrix):
rows = len(matrix) # number of rows in matrix
cols = len(matrix[0]) # number of columns in matrix
# matrix m by n filled by 0
matrix_out = [[0 for _ in range(cols)] for _ in range(rows)]
for i in range(rows):
matrix_out[rows-i-1] = matrix[i][::]
return matrix_out
def get_matrix_minor(matrix, i, j):
return [row[:j] + row[j + 1:] for row in (matrix[:i] + matrix[i + 1:])]
def calculate_determinant(matrix):
rows = len(matrix) # number of rows in matrix
matrix_det = 0
if rows == 1:
return matrix[0][0]
if rows == 2:
return (matrix[0][0] * matrix[1][1]) - (matrix[0][1] * matrix[1][0])
for c in range(rows):
matrix_det += ((-1) ** c) * matrix[0][c] * calculate_determinant(get_matrix_minor(matrix, 0, c))
return matrix_det
def zeros_matrix(rows, cols):
matrix = []
for i in range(rows):
matrix.append([])
for j in range(cols):
if j == i:
matrix[-1].append(1.0)
else:
matrix[-1].append(0.0)
return matrix
def inverse_matrix(AM):
n = len(AM)
IM = zeros_matrix(n, n)
fd = 0 # fd stands for focus diagonal OR the current diagonal
fdScaler = 1. / AM[fd][fd]
for j in range(n): # using j to indicate cycling thru columns
AM[fd][j] = fdScaler * AM[fd][j]
IM[fd][j] = fdScaler * IM[fd][j]
indices = list(range(n))
for i in indices[0:fd] + indices[fd + 1:]: # *** skip row with fd in it.
crScaler = AM[i][fd] # cr stands for "current row".
for j in range(n): # cr - crScaler * fdRow, but one element at a time.
AM[i][j] = AM[i][j] - crScaler * AM[fd][j]
IM[i][j] = IM[i][j] - crScaler * IM[fd][j]
indices = list(range(n)) # to allow flexible row referencing ***
# We've already run for fd = 0, now let's run for fd = 1 to the last fd
for fd in range(1, n): # fd stands for focus diagonal
fdScaler = 1.0 / AM[fd][fd]
# FIRST: scale fd row with fd inverse.
for j in range(n): # Use j to indicate column looping.
AM[fd][j] *= fdScaler
IM[fd][j] *= fdScaler
# SECOND: operate on all rows except fd row.
for i in indices[:fd] + indices[fd + 1:]: # *** skip row with fd in it.
crScaler = AM[i][fd] # cr stands for "current row".
for j in range(n): # cr - crScaler * fdRow, but one element at a time.
AM[i][j] = AM[i][j] - crScaler * AM[fd][j]
IM[i][j] = IM[i][j] - crScaler * IM[fd][j]
print("The result is:")
print_matrix(IM)
return IM
if __name__ == '__main__':
choice = ""
while True:
fir_mtx = []
sec_mtx = []
output_mtx = []
print("1. Add matrices")
print("2. Multiply matrix by a constant")
print("3. Multiply matrices")
print("4. Transpose matrix")
print("5. Calculate a determinant")
print("6. Inverse matrix")
print("0. Exit")
choice = int(input("Your choice:"))
if choice == 0:
break
elif choice == 1:
fir_mtx_input = [int(x) for x in input("Enter size of first matrix:").split(" ")]
print("Enter first matrix:")
fill_matrix(fir_mtx, fir_mtx_input)
sec_mtx_input = [int(x) for x in input("Enter size of second matrix:").split(" ")]
print("Enter second matrix:")
fill_matrix(sec_mtx, sec_mtx_input)
if fir_mtx_input[0] == sec_mtx_input[0] and\
fir_mtx_input[1] == sec_mtx_input[1]:
add_matrix(fir_mtx, sec_mtx, output_mtx, fir_mtx_input)
print_matrix(output_mtx)
else:
print("ERROR")
elif choice == 2:
fir_mtx_input = [int(x) for x in input().split()]
fill_matrix(fir_mtx, fir_mtx_input)
constant = int(input())
multi_matrix_constant(fir_mtx, constant, output_mtx, fir_mtx_input)
print_matrix(output_mtx)
elif choice == 3:
fir_mtx_input = [int(x) for x in input("Enter size of first matrix:").split(" ")]
print("Enter first matrix:")
fill_matrix(fir_mtx, fir_mtx_input)
sec_mtx_input = [int(x) for x in input("Enter size of second matrix:").split(" ")]
print("Enter second matrix:")
fill_matrix(sec_mtx, sec_mtx_input)
if fir_mtx_input[1] != sec_mtx_input[0]:
print("The operation cannot be performed.\n")
else:
output_mtx = multi_matrix(fir_mtx, sec_mtx)
print_matrix(output_mtx)
elif choice == 4:
print("1. Main diagonal")
print("2. Side diagonal")
print("3. Vertical line")
print("4. Horizontal line")
choice = int(input("Your choice:"))
fir_mtx_input = [int(x) for x in input("Enter matrix size:").split(" ")]
print("Enter matrix:")
fill_matrix(fir_mtx, fir_mtx_input)
if choice == 1:
output_mtx = main_diagonal(fir_mtx)
elif choice == 2:
output_mtx = side_diagonal(fir_mtx)
elif choice == 3:
output_mtx = vertical_line(fir_mtx)
elif choice == 4:
output_mtx = horizontal_line(fir_mtx)
print_matrix(output_mtx)
elif choice == 5:
fir_mtx_input = [int(x) for x in input("Enter size of first matrix:").split(" ")]
print("Enter first matrix:")
fill_matrix(fir_mtx, fir_mtx_input)
matrix_det = calculate_determinant(fir_mtx)
print("The result is:")
print(matrix_det)
elif choice == 6:
fir_mtx_input = [int(x) for x in input("Enter size of first matrix:").split(" ")]
print("Enter matrix:")
fill_matrix(fir_mtx, fir_mtx_input)
inverse_matrix(fir_mtx)
|
nilq/baby-python
|
python
|
from django.contrib.auth.models import User
from rest_framework import serializers
from ..models import Game
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'is_staff')
class GameSerializer(serializers.ModelSerializer):
creator = UserSerializer(read_only=True)
player1_score = serializers.IntegerField(read_only=True)
player2_score = serializers.IntegerField(read_only=True)
class Meta:
model = Game
fields = ('id', 'created', 'creator',
'player1', 'player2',
'player1_score', 'player2_score')
|
nilq/baby-python
|
python
|
extensions = ['sphinx.ext.autosectionlabel']
autosectionlabel_prefix_document = True
|
nilq/baby-python
|
python
|
from fastapi.testclient import TestClient
from main import app
from unittest import TestCase, mock
from persistence.repositories.question_template_repository_postgres import QuestionTemplateRepositoryPostgres
from infrastructure.db.question_template_schema import QuestionTemplate, QuestionTypeEnum
import json
import os
apikey = os.getenv("API_KEY")
qtrp = QuestionTemplateRepositoryPostgres()
client = TestClient(app)
# Post
post_header = {"apikey": apikey, "exam_template_id": "5122b737-f815-4e15-a56d-abbff2fee900"}
post_body = {
"question": "What is 1 + 1?",
}
# Get
get_header = {
"apikey": apikey,
"exam_template_id": "5122b737-f815-4e15-a56d-abbff2fee900",
"question_template_id": "2be97039-8c07-48ae-a18e-16d4779b977b",
}
return_from_get = QuestionTemplate(
id="2be97039-8c07-48ae-a18e-16d4779b977b",
exam_id="5122b737-f815-4e15-a56d-abbff2fee900",
question="What is 1 + 1?",
question_type=QuestionTypeEnum.written,
options=None,
correct=None,
value=1,
)
# Get all by exam template id
get_all_by_exam_id_header = {"apikey": apikey, "exam_template_id": "5122b737-f815-4e15-a56d-abbff2fee900"}
return_from_get_all_by_exam_id = [
QuestionTemplate(
id="2be97039-8c07-48ae-a18e-16d4779b977b",
exam_id="5122b737-f815-4e15-a56d-abbff2fee900",
question="What is 1 + 1?",
question_type=QuestionTypeEnum.written,
options=None,
correct=None,
value=1,
)
]
# Delete
delete_header = {
"apikey": apikey,
"exam_template_id": "5122b737-f815-4e15-a56d-abbff2fee900",
"question_template_id": "2be97039-8c07-48ae-a18e-16d4779b977b",
}
return_from_delete = None
# Update
update_header = {
"apikey": apikey,
"exam_template_id": "5122b737-f815-4e15-a56d-abbff2fee900",
"question_template_id": "2be97039-8c07-48ae-a18e-16d4779b977b",
}
update_body = {
"question": "What is 1 * 1?",
"value": 2,
}
class QuestionTemplateMock(TestCase):
@mock.patch.object(QuestionTemplateRepositoryPostgres, "add_question_template")
def test_create_question_template(self, mock_post):
mock_post.return_value = None
exam_template_id = "5122b737-f815-4e15-a56d-abbff2fee900"
response = client.post(f"/exams/{exam_template_id}/questions/", data=json.dumps(post_body), headers=post_header)
assert response.status_code == 201, response.text
data = response.json()
assert data["exam_id"] == exam_template_id
assert data["question"] == "What is 1 + 1?"
assert data["question_type"] == "written"
assert data["options"] is None
assert data["correct"] is None
assert data["value"] == 1
@mock.patch.object(QuestionTemplateRepositoryPostgres, "get_question_template")
def test_get_question_template(self, mock_get):
mock_get.return_value = return_from_get
exam_template_id = "5122b737-f815-4e15-a56d-abbff2fee900"
question_template_id = "2be97039-8c07-48ae-a18e-16d4779b977b"
response = client.get(f"/exams/{exam_template_id}/questions/{question_template_id}", headers=get_header)
assert response.status_code == 200, response.text
data = response.json()
assert data["id"] == question_template_id
assert data["exam_id"] == exam_template_id
assert data["question"] == "What is 1 + 1?"
assert data["question_type"] == "written"
assert data["options"] is None
assert data["correct"] is None
assert data["value"] == 1
@mock.patch.object(QuestionTemplateRepositoryPostgres, "get_all_question_templates_by_exam_template_id")
def test_get_all_by_exam_id(self, mock_get_all_by_exam_id):
mock_get_all_by_exam_id.return_value = return_from_get_all_by_exam_id
exam_template_id = "5122b737-f815-4e15-a56d-abbff2fee900"
question_template_id = "2be97039-8c07-48ae-a18e-16d4779b977b"
response = client.get(f"/exams/{exam_template_id}/questions/", headers=get_all_by_exam_id_header)
assert response.status_code == 200, response.text
data = response.json()
assert data["exam_template_id"] == exam_template_id
assert data["amount"] == 1
assert data["question_templates"][0]["id"] == question_template_id
assert data["question_templates"][0]["exam_id"] == exam_template_id
assert data["question_templates"][0]["question"] == "What is 1 + 1?"
assert data["question_templates"][0]["question_type"] == "written"
assert data["question_templates"][0]["options"] is None
assert data["question_templates"][0]["correct"] is None
assert data["question_templates"][0]["value"] == 1
@mock.patch.object(QuestionTemplateRepositoryPostgres, "delete_question_template")
@mock.patch.object(QuestionTemplateRepositoryPostgres, "get_question_template")
def test_delete_question_template(self, mock_get, mock_delete):
mock_get.return_value = return_from_get
mock_delete.return_value = return_from_delete
exam_template_id = "5122b737-f815-4e15-a56d-abbff2fee900"
question_template_id = "2be97039-8c07-48ae-a18e-16d4779b977b"
response = client.delete(f"/exams/{exam_template_id}/questions/{question_template_id}", headers=delete_header)
assert response.status_code == 200, response.text
data = response.json()
assert data["message"] == f"The question template {question_template_id} was deleted successfully"
@mock.patch.object(QuestionTemplateRepositoryPostgres, "update_question_template")
@mock.patch.object(QuestionTemplateRepositoryPostgres, "get_question_template")
def test_update_question_template(self, mock_get, mock_update):
mock_get.return_value = return_from_get
mock_update.return_value = None
exam_template_id = "5122b737-f815-4e15-a56d-abbff2fee900"
question_template_id = "2be97039-8c07-48ae-a18e-16d4779b977b"
response = client.patch(
f"/exams/{exam_template_id}/questions/{question_template_id}", data=json.dumps(update_body), headers=update_header
)
assert response.status_code == 200, response.text
data = response.json()
assert data["id"] == question_template_id
assert data["exam_id"] == exam_template_id
assert data["question"] == "What is 1 * 1?"
assert data["question_type"] == "written"
assert data["options"] is None
assert data["correct"] is None
assert data["value"] == 2
|
nilq/baby-python
|
python
|
# coding=utf-8
import time
import re
import zlib
import random
from gzip import GzipFile
from PIL import Image
# 兼容2.7和3.x
try:
from io import BytesIO as StringIO
except ImportError:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
'''
百度云引擎工具模块
'''
def get_time():
'''
获取当前时间戳
Returns:
string格式当前时间戳
'''
return str(int(time.time()))
def get_json_from_response(response):
'''
从response中获取json数据
Args:
response:访问返回值
Returns:
正则结果的list
'''
return re.findall('\(({[\s\S]*?})\)', response)
def deflate_decode(data):
'''
deflate加密解码
Args:
data:加密数据
Returns:
解密数据
'''
try:
return zlib.decompress(data, -zlib.MAX_WBITS)
except zlib.error:
return zlib.decompress(data)
def gzip_decode(data) :
'''
gzip加密解码
Args:
data:加密数据
Returns:
解密数据
'''
buf = StringIO(data)
f = GzipFile(fileobj=buf)
return f.read()
last_msg = ''
def show_msg(msg):
global last_msg
last_msg = msg
print(msg)
def get_callback_function():
'''
随机生成callback函数名
:returns: callback函数名
'''
list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j',
'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z']
return ''.join(random.sample(list, 6))
def get_gid_char(range_start):
'''
随机生成gid的每一位字符
Args:
range_start:起始范围
Returns:
gid字符
'''
char = hex(random.randint(range_start, 15))
char = char[2:].upper()
return char
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
Application: COMPOSE Framework - K-Nearest Neighbors Algorithm
File name: knn.py
Author: Martin Manuel Lopez
Creation: 10/20/2021
The University of Arizona
Department of Electrical and Computer Engineering
College of Engineering
"""
# MIT License
#
# Copyright (c) 2021
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pandas as pd
import numpy as np
import scipy as sp
import math
import random as rd
class KNN:
def __init__(self, data, n_folds) -> None:
self.data = pd.DataFrame(data)
self.N_features = np.shape(self.data)[1]
self.n_folds = n_folds # 5 fold cross validation
## KNN algorithm
# Find the min and max values for each column
def dataset_minmax(self):
dataset = np.array(self.data)
minmax = list()
for i in range(len(dataset[0])):
col_values = [row[i] for row in dataset]
value_min = min(col_values)
value_max = max(col_values)
minmax.append([value_min, value_max])
return minmax
# Rescale dataset columns to the range 0-1
def normalize_dataset(self, dataset, minmax):
for row in dataset:
for i in range(len(row)):
row[i] = (row[i] - minmax[i][0]) / (minmax[i][1] - minmax[i][0])
# Split a dataset into k folds
def cross_validation_split(self, dataset, n_folds):
dataset_split = list()
dataset_copy = list(dataset)
fold_size = int(len(dataset) / n_folds)
for _ in range(n_folds):
fold = list()
while len(fold) < fold_size:
index = rd.randrange(len(dataset_copy))
fold.append(dataset_copy.pop(index))
dataset_split.append(fold)
dataset_array = np.array(dataset_split)
return dataset_array.tolist()
# Calculate accuracy percentage
def accuracy_metric(self, actual, predicted):
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct += 1
return correct / float(len(actual)) * 100.0
def euclidean_distance(self, row1, row2):
distance = 0.0
for i in range(len(row1)-1) :
distance += (row1[i] - row2[i])**2
return math.sqrt(distance)
def get_nearest_neighbors(self, train, test_row, num_neighbors):
distances = list()
for train_row in train:
dist = self.euclidean_distance(test_row, train_row)
distances.append((train_row, dist))
distances.sort(key=lambda tup: tup[1])
neighbors = list()
for i in range(num_neighbors):
neighbors.append(distances[i][0])
return neighbors
def predict_classification(self, train, test_row, num_neighbors):
neighbors = self.get_nearest_neighbors(train, test_row, num_neighbors)
output_values = [row[-1] for row in neighbors]
prediction = max(set(output_values), key=output_values.count)
return prediction
def k_nearest_neighbors(self, train, test, num_neighbors):
predictions = list()
for row in test:
output = self.predict_classification(train, row, num_neighbors)
predictions.append(output)
return predictions
def knn_run(self, option):
dataset = np.array(self.data)
folds = self.cross_validation_split(dataset, self.n_folds)
scores = []
knn_distances = []
accuracies = []
for fold in folds:
train_set = list(folds)
train_set.remove(fold)
train_set = sum(train_set, [])
test_set = list()
for row in fold:
row_copy = list(row)
test_set.append(row_copy)
row_copy[-1] = None
predicted_dist = self.k_nearest_neighbors(train_set, test_set, self.N_features)
actual = [row[-1] for row in fold]
accuracy = self.accuracy_metric(actual, predicted_dist)
scores.append(accuracy)
knn_distances.append(predicted_dist)
accuracies.append(accuracy)
if option == 'scores':
return scores
elif option == 'knn_dist':
return knn_distances
elif option == 'knn_accuracy':
return accuracy
else:
return "KNN can only return: 'scores', 'knn_dist', or 'knn_accuracy'. Please reselect KNN options"
|
nilq/baby-python
|
python
|
import csv
import logging
import os
import string
import numpy as np
import tensorflow as tf
from gensim.models import KeyedVectors
from sklearn.metrics.pairwise import cosine_similarity
from keyed_vectors_prediction_config import KeyedVectorsPredictionConfig
class KeyedVectorsFormatPredictor:
def __init__(self):
tf.logging.set_verbosity(tf.logging.INFO)
self.config = KeyedVectorsPredictionConfig()
logging.info("loading keyed vectors file")
if self.config.bin_file:
self.word2vec = KeyedVectors.load(self.config.keyed_vectors_model)
else:
self.word2vec = KeyedVectors.load_word2vec_format(self.config.keyed_vectors_model)
logging.info("keyed vectors loaded")
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
@classmethod
def _create_context_pairs(cls, lines):
examples = []
for (i, line) in enumerate(lines):
text_a = line[4]
text_b = line[5]
examples.append(
ContextPair(text_a=text_a, text_b=text_b))
return examples
def predict(self, dir_in, filename, dir_out):
context_pairs = self._create_context_pairs(
self._read_tsv(os.path.join(dir_in, filename)))
result = []
for context_pair in context_pairs:
words_a = self.get_words(context_pair.text_a)
words_b = self.get_words(context_pair.text_b)
similarity = float(0)
if len(words_a) > 0 and len(words_b) > 0:
text_a_embedding = self.get_mean_vector(words_a)
text_b_embedding = self.get_mean_vector(words_b)
similarity = cosine_similarity([text_a_embedding], [text_b_embedding])[0][0]
result.append(similarity)
output_predict_file = os.path.join(dir_out,
"{0}_result.tsv".format(self.get_filename_without_extension(filename)))
with tf.gfile.GFile(output_predict_file, "w") as writer:
tf.logging.info("***** Predict results *****")
for (i, similarity) in enumerate(result):
output_line = str(similarity) + "\n"
writer.write(output_line)
@classmethod
def get_words(cls, text):
return text.lower().translate(str.maketrans('', '', string.punctuation)).split()
def get_mean_vector(self, words):
words = [word for word in words if word in self.word2vec.vocab]
if len(words) >= 1:
return np.mean(self.word2vec[words], axis=0)
else:
return []
@classmethod
def get_filename_without_extension(cls, filename):
return os.path.splitext(filename)[0]
def _read_tsv(input_file, quotechar=None):
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
def _create_context_pairs(lines):
examples = []
for (i, line) in enumerate(lines):
text_a = line[4]
text_b = line[5]
examples.append(
ContextPair(text_a=text_a, text_b=text_b))
return examples
def get_mean_vector(self, words):
words = [word for word in words if word in self.word2vec.vocab]
if len(words) >= 1:
return np.mean(self.word2vec[words], axis=0)
else:
return []
class ContextPair(object):
def __init__(self, text_a, text_b):
self.text_a = text_a
self.text_b = text_b
|
nilq/baby-python
|
python
|
import numpy as np
import matplotlib.pyplot as plt
import banners
from constants import *
from scipy import stats
#%% Model parameters
n=1 # Number successes
p_cons = banners.DEFAULT_EVENT_RATES.fiveStarCons #* banners.DEFAULT_EVENT_RATES.fiveStarPriorityRate# Probability of success
primo_spend = 181
usd_spend = 200
num_pulls = 0 # Existing wish items
primo_spend += usd_spend * PRIMO_PER_USD_BEST
num_pulls += primo_spend // WISH_PRIMO_COST
print(f"Probability assuming a total of {num_pulls} pulls.")
## Simple Model
simple_model = stats.binom(n=num_pulls,p=p_cons)
## Complex (Speculative) Model
class genshin_speculative(stats.rv_discrete):
"""Speculative model for the rates of Genshin Impact. Based on formula reverse engineered by the
Chinese playerbase. Models a soft pity system that kicks in at a given point."""
def __init__(self,base,soft_threshold,soft_increase,hard_threshold):
xk = np.arange(hard_threshold + 1)
# Error: probabilities of pk must sum to 1.
f = np.vectorize(lambda x: base + soft_increase if x > soft_threshold else (1 if x == hard_threshold else base))
pk = f(xk)
super().__init__(values=(xk, pk))
complex_model = genshin_speculative(banners.DEFAULT_EVENT_RATES.fiveStarCons,
banners.DEFAULT_EVENT_RATES.fiveStarPityRampThresh,
banners.DEFAULT_EVENT_RATES.fiveStarSoftPityRate,
banners.DEFAULT_EVENT_RATES.fiveStarPity)
#%% Expected number of copies of limited 5 star character.
mean_rolls_per_char = stats.nbinom.mean(n=n, p=p)
print(f"On average, you would need {mean_rolls_per_char} pulls to get {n} copies of the limited character.")
print(f"This is equal to {mean_rolls_per_char * WISH_PRIMO_COST} gems or ${(mean_rolls_per_char * WISH_PRIMO_COST) / PRIMO_PER_USD_BEST:.2f}")
successes = np.arange(0, 11)
fig, axs = plt.subplots(2, 1, sharex=True, sharey=True)
ax1 = axs[0]
ax2 = axs[1]
distribution = complex_model
print(distribution.cdf(1))
ax1.bar(successes, 100 * distribution.pmf(successes))
ax2.bar(successes, 100 * (1 - distribution.cdf(successes - 1)))
# ppf takes a percentile and returns the value at that percentile
# ax2.plot(successes,stats.binom.ppf(q=successes, n=num_pulls, p=p))
# Format the plot
start, end = ax1.get_xlim()
ax1.set_xlabel(xlabel="Copies of 5 Star Limited Character")
ax1.xaxis.set_ticks(successes)
ax1.set_ylim(0, 100)
ax2.set_ylim(0, 100)
ax1.set_ylabel(ylabel="Exactly this many")
ax2.set_ylabel(ylabel="At least this many")
plt.show()
|
nilq/baby-python
|
python
|
"""Routing manager classes for tracking and inspecting routing records."""
import json
from typing import Sequence
from ...config.injection_context import InjectionContext
from ...error import BaseError
from ...messaging.util import time_now
from ...storage.base import BaseStorage, StorageRecord
from ...storage.error import StorageError, StorageDuplicateError, StorageNotFoundError
from .messages.route_update_request import RouteUpdateRequest
from .models.route_record import RouteRecord
from .models.route_update import RouteUpdate
from .models.route_updated import RouteUpdated
class RoutingManagerError(BaseError):
"""Generic routing error."""
class RouteNotFoundError(RoutingManagerError):
"""Requested route was not found."""
class RoutingManager:
"""Class for handling routing records."""
RECORD_TYPE = "forward_route"
def __init__(self, context: InjectionContext):
"""
Initialize a RoutingManager.
Args:
context: The context for this manager
"""
self._context = context
if not context:
raise RoutingManagerError("Missing request context")
@property
def context(self) -> InjectionContext:
"""
Accessor for the current request context.
Returns:
The request context for this connection
"""
return self._context
async def get_recipient(self, recip_verkey: str) -> RouteRecord:
"""
Resolve the recipient for a verkey.
Args:
recip_verkey: The verkey ("to") of the incoming Forward message
Returns:
The `RouteRecord` associated with this verkey
"""
storage: BaseStorage = await self._context.inject(BaseStorage)
try:
record = await storage.search_records(
self.RECORD_TYPE, {"recipient_key": recip_verkey}
).fetch_single()
except StorageDuplicateError:
raise RouteNotFoundError(
"Duplicate routes found for verkey: %s", recip_verkey
)
except StorageNotFoundError:
raise RouteNotFoundError("No route defined for verkey: %s", recip_verkey)
value = json.loads(record.value)
return RouteRecord(
record_id=record.id,
connection_id=record.tags["connection_id"],
recipient_key=record.tags["recipient_key"],
created_at=value.get("created_at"),
updated_at=value.get("updated_at"),
)
async def get_routes(
self, client_connection_id: str = None, tag_filter: dict = None
) -> Sequence[RouteRecord]:
"""
Fetch all routes associated with the current connection.
Args:
client_connection_id: The ID of the connection record
tag_filter: An optional dictionary of tag filters
Returns:
A sequence of route records found by the query
"""
filters = {}
if client_connection_id:
filters["connection_id"] = client_connection_id
if tag_filter:
for key in ("recipient_key",):
if key not in tag_filter:
continue
val = tag_filter[key]
if isinstance(val, str):
filters[key] = val
elif isinstance(val, list):
filters[key] = {"$in": val}
else:
raise RoutingManagerError(
"Unsupported tag filter: '{}' = {}".format(key, val)
)
results = []
storage: BaseStorage = await self._context.inject(BaseStorage)
async for record in storage.search_records(self.RECORD_TYPE, filters):
value = json.loads(record.value)
value.update(record.tags)
results.append(RouteRecord(**value))
return results
async def create_route_record(
self, client_connection_id: str = None, recipient_key: str = None
) -> RouteRecord:
"""
Create and store a new RouteRecord.
Args:
client_connection_id: The ID of the connection record
recipient_key: The recipient verkey of the route
Returns:
The new routing record
"""
if not client_connection_id:
raise RoutingManagerError("Missing client_connection_id")
if not recipient_key:
raise RoutingManagerError("Missing recipient_key")
value = {"created_at": time_now(), "updated_at": time_now()}
record = StorageRecord(
self.RECORD_TYPE,
json.dumps(value),
{"connection_id": client_connection_id, "recipient_key": recipient_key},
)
storage: BaseStorage = await self._context.inject(BaseStorage)
await storage.add_record(record)
result = RouteRecord(
record_id=record.id,
connection_id=client_connection_id,
recipient_key=recipient_key,
created_at=value["created_at"],
updated_at=value["updated_at"],
)
return result
async def delete_route_record(self, route: RouteRecord):
"""Remove an existing route record."""
if route and route.record_id:
storage: BaseStorage = await self._context.inject(BaseStorage)
await storage.delete_record(
StorageRecord(None, None, None, route.record_id)
)
async def update_routes(
self, client_connection_id: str, updates: Sequence[RouteUpdate]
) -> Sequence[RouteUpdated]:
"""
Update routes associated with the current connection.
Args:
client_connection_id: The ID of the connection record
updates: The sequence of route updates (create/delete) to perform.
"""
exist_routes = await self.get_routes(client_connection_id)
exist = {}
for route in exist_routes:
exist[route.recipient_key] = route
updated = []
for update in updates:
result = RouteUpdated(
recipient_key=update.recipient_key, action=update.action
)
recip_key = update.recipient_key
if not recip_key:
result.result = result.RESULT_CLIENT_ERROR
elif update.action == update.ACTION_CREATE:
if recip_key in exist:
result.result = result.RESULT_NO_CHANGE
else:
try:
await self.create_route_record(client_connection_id, recip_key)
except RoutingManagerError:
result.result = result.RESULT_SERVER_ERROR
else:
result.result = result.RESULT_SUCCESS
elif update.action == update.ACTION_DELETE:
if recip_key in exist:
try:
await self.delete_route_record(exist[recip_key])
except StorageError:
result.result = result.RESULT_SERVER_ERROR
else:
result.result = result.RESULT_SUCCESS
else:
result.result = result.RESULT_NO_CHANGE
else:
result.result = result.RESULT_CLIENT_ERROR
updated.append(result)
return updated
async def send_create_route(
self, router_connection_id: str, recip_key: str, outbound_handler
):
"""Create and send a route update request.
Returns: the current routing state (request or done)
"""
msg = RouteUpdateRequest(
updates=[
RouteUpdate(recipient_key=recip_key, action=RouteUpdate.ACTION_CREATE)
]
)
await outbound_handler(msg, connection_id=router_connection_id)
|
nilq/baby-python
|
python
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField, DateField, SelectField
from wtforms.validators import DataRequired, Optional
from wotd.models import PartOfSpeech
class WordForm(FlaskForm):
word = StringField('Word', validators=[DataRequired()])
part_o_speech = SelectField('Part of Speech', coerce=int)
definition = TextAreaField('Definition', validators=[DataRequired()])
exampleSentence = TextAreaField('Example Sentence', validators=[DataRequired()])
ipa = StringField('Pronunciation', validators=[DataRequired()])
date_published = DateField('Publish Date', validators=[Optional(strip_whitespace=True)])
submit = SubmitField('Submit')
def get_parts_of_speech(self):
self.part_o_speech.choices = [(-1, 'Select...')] \
+ [(p.id, p.partOfSpeech) for p in PartOfSpeech.query.order_by('id')]
|
nilq/baby-python
|
python
|
import re
import os
import argparse
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from dataloader import LOSO_sequence_generate
# Selected action units
AU_CODE = [1, 2, 4, 10, 12, 14, 15, 17, 25]
AU_DICT = {
number: idx
for idx, number in enumerate(AU_CODE)
}
def evaluate_adj(df, args):
assert isinstance(df, (str, pd.DataFrame)), "Type not supported"
if isinstance(df, str):
# Read in data
df = pd.read_csv(args.csv_name)
# Take out the `Action Units` Columns
data = df.loc[:, "Action Units"]
# Create a blank matrix for counting the adjacent
count_matrix = np.zeros((9, 9))
# Create a blank list for counting the au
count_au = np.zeros(9)
# Split the action list
for idx, unit in enumerate(data):
# Find only the digit
au_list = re.findall(r"\d+", unit)
# Filter the AU_CODE
au_list = list(filter(lambda au: int(au) in AU_CODE, au_list))
for i in range(len(au_list)):
first_code = AU_DICT[int(au_list[i])]
for j in range(i + 1, len(au_list)):
second_code = AU_DICT[int(au_list[j])]
count_matrix[first_code, second_code] += 1
count_matrix[second_code, first_code] += 1
# Count the total appear times
count_au[first_code] += 1
# Replace 0 in count_au to 1
count_au = np.where(count_au == 0.0, 1, count_au)
# Compute the adjancent matrix
adj_matrix = count_matrix / count_au.reshape(-1, 1)
# Show the information
print("AU appers:\n", count_au)
if args["save_img"]:
plt.matshow(adj_matrix, cmap="summer")
for (i, j), z in np.ndenumerate(adj_matrix):
plt.text(j, i, '{:0.2f}'.format(z), ha='center', va='center')
plt.savefig(args["jpg_name"], format="svg", dpi=1200)
np.savez(args["npz_name"],
adj_matrix=adj_matrix)
def save_LOSO_adj(args):
data = pd.read_csv(args.csv_name)
train_list, _ = LOSO_sequence_generate(data, "Subject")
os.makedirs(args.npz_place, exist_ok=True)
for idx, train_info in enumerate(train_list):
evaluate_adj(df=train_info,
args={
"npz_name": f"{args.npz_place}/{idx}.npz",
"jpg_name": f"{args.image_place}/{idx}.svg",
"save_img": args.save_img
})
if __name__ == "__main__":
# Argument parse
parser = argparse.ArgumentParser()
parser.add_argument("--csv_name",
type=str,
required=True,
help="Filename")
parser.add_argument("--npz_place",
type=str,
required=True,
help="The root place for saving npz files")
parser.add_argument("--save_img",
action="store_true",
default=False)
parser.add_argument("--image_place",
type=str,
default=None,
help="The root place for saving images")
args = parser.parse_args()
save_LOSO_adj(args)
|
nilq/baby-python
|
python
|
# Unit test _bayesian_search_skopt
# ==============================================================================
import pytest
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import Ridge
from skopt.space import Categorical, Real, Integer
from skopt.utils import use_named_args
from skopt import gp_minimize
from skforecast.ForecasterAutoreg import ForecasterAutoreg
from skforecast.ForecasterAutoregCustom import ForecasterAutoregCustom
from skforecast.model_selection import backtesting_forecaster
from skforecast.model_selection.model_selection import _bayesian_search_skopt
from tqdm import tqdm
from functools import partialmethod
tqdm.__init__ = partialmethod(tqdm.__init__, disable=True) # hide progress bar
# Fixtures _backtesting_forecaster_refit Series (skforecast==0.4.2)
# np.random.seed(123)
# y = np.random.rand(50)
y = pd.Series(
np.array([0.69646919, 0.28613933, 0.22685145, 0.55131477, 0.71946897,
0.42310646, 0.9807642 , 0.68482974, 0.4809319 , 0.39211752,
0.34317802, 0.72904971, 0.43857224, 0.0596779 , 0.39804426,
0.73799541, 0.18249173, 0.17545176, 0.53155137, 0.53182759,
0.63440096, 0.84943179, 0.72445532, 0.61102351, 0.72244338,
0.32295891, 0.36178866, 0.22826323, 0.29371405, 0.63097612,
0.09210494, 0.43370117, 0.43086276, 0.4936851 , 0.42583029,
0.31226122, 0.42635131, 0.89338916, 0.94416002, 0.50183668,
0.62395295, 0.1156184 , 0.31728548, 0.41482621, 0.86630916,
0.25045537, 0.48303426, 0.98555979, 0.51948512, 0.61289453]))
def test_bayesian_search_skopt_exception_when_search_space_names_do_not_match():
'''
Test Exception is raised when search_space key name do not match the Space
object name from skopt.
'''
forecaster = ForecasterAutoreg(
regressor = Ridge(random_state=123),
lags = 2 # Placeholder, the value will be overwritten
)
steps = 3
n_validation = 12
y_train = y[:-n_validation]
lags_grid = [2, 4]
search_space = {'not_alpha': Real(0.01, 1.0, "log-uniform", name='alpha')}
with pytest.raises(Exception):
_bayesian_search_skopt(
forecaster = forecaster,
y = y,
lags_grid = lags_grid,
search_space = search_space,
steps = steps,
metric = 'mean_absolute_error',
refit = True,
initial_train_size = len(y_train),
fixed_train_size = True,
n_trials = 10,
random_state = 123,
return_best = False,
verbose = False
)
def test_results_output_bayesian_search_skopt_ForecasterAutoreg_with_mocked():
'''
Test output of _bayesian_search_skopt in ForecasterAutoreg with mocked
(mocked done in Skforecast v0.4.3).
'''
forecaster = ForecasterAutoreg(
regressor = RandomForestRegressor(random_state=123),
lags = 2 # Placeholder, the value will be overwritten
)
steps = 3
n_validation = 12
y_train = y[:-n_validation]
lags_grid = [2, 4]
search_space = {'n_estimators': Integer(10, 20, "uniform", name='n_estimators'),
'max_depth': Real(1, 5, "log-uniform", name='max_depth'),
'max_features': Categorical(['auto', 'sqrt'], name='max_features')
}
results = _bayesian_search_skopt(
forecaster = forecaster,
y = y,
lags_grid = lags_grid,
search_space = search_space,
steps = steps,
metric = 'mean_absolute_error',
refit = True,
initial_train_size = len(y_train),
fixed_train_size = True,
n_trials = 10,
random_state = 123,
return_best = False,
verbose = False
)[0]
expected_results = pd.DataFrame({
'lags' :[[1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2],
[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4],
[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
'params':[{'n_estimators': 17, 'max_depth': 1.9929129312200498, 'max_features': 'sqrt'},
{'n_estimators': 17, 'max_depth': 2.2043340187845697, 'max_features': 'sqrt'},
{'n_estimators': 14, 'max_depth': 2.5420783112854197, 'max_features': 'auto'},
{'n_estimators': 14, 'max_depth': 2.7445792319355813, 'max_features': 'auto'},
{'n_estimators': 12, 'max_depth': 3.059236337842803, 'max_features': 'sqrt'},
{'n_estimators': 16, 'max_depth': 2.0310778111301357, 'max_features': 'auto'},
{'n_estimators': 17, 'max_depth': 1.9909655528496835, 'max_features': 'auto'},
{'n_estimators': 15, 'max_depth': 3.29188739864399, 'max_features': 'auto'},
{'n_estimators': 14, 'max_depth': 2.8683395097937403, 'max_features': 'auto'},
{'n_estimators': 12, 'max_depth': 4.904323050812992, 'max_features': 'sqrt'},
{'n_estimators': 17, 'max_depth': 1.9929129312200498, 'max_features': 'sqrt'},
{'n_estimators': 17, 'max_depth': 2.2043340187845697, 'max_features': 'sqrt'},
{'n_estimators': 14, 'max_depth': 2.5420783112854197, 'max_features': 'auto'},
{'n_estimators': 14, 'max_depth': 2.7445792319355813, 'max_features': 'auto'},
{'n_estimators': 12, 'max_depth': 3.059236337842803, 'max_features': 'sqrt'},
{'n_estimators': 16, 'max_depth': 2.0310778111301357, 'max_features': 'auto'},
{'n_estimators': 17, 'max_depth': 1.9909655528496835, 'max_features': 'auto'},
{'n_estimators': 15, 'max_depth': 3.29188739864399, 'max_features': 'auto'},
{'n_estimators': 14, 'max_depth': 2.8683395097937403, 'max_features': 'auto'},
{'n_estimators': 12, 'max_depth': 4.904323050812992, 'max_features': 'sqrt'}],
'metric':np.array([0.21615799463348997, 0.21704325818847112, 0.227837004285555,
0.227837004285555, 0.22228329404011593, 0.22331462080401032,
0.22474421769386224, 0.21041138481130603, 0.227837004285555,
0.20541235571650687, 0.2198566352889403, 0.21368735246085513,
0.23578208465562722, 0.23578208465562722, 0.21856857925536957,
0.23649308193173593, 0.22528691869878895, 0.22001004752280182,
0.23578208465562722, 0.22606118006271245]),
'n_estimators' :np.array([17, 17, 14, 14, 12, 16, 17, 15, 14, 12, 17,
17, 14, 14, 12, 16, 17, 15, 14, 12]),
'max_depth' :np.array([1.9929129312200498, 2.2043340187845697, 2.5420783112854197,
2.7445792319355813, 3.059236337842803, 2.0310778111301357,
1.9909655528496835, 3.29188739864399, 2.8683395097937403,
4.904323050812992, 1.9929129312200498, 2.2043340187845697,
2.5420783112854197, 2.7445792319355813, 3.059236337842803,
2.0310778111301357, 1.9909655528496835, 3.29188739864399,
2.8683395097937403, 4.904323050812992]),
'max_features' :['sqrt', 'sqrt', 'auto', 'auto', 'sqrt', 'auto', 'auto',
'auto', 'auto', 'sqrt', 'sqrt', 'sqrt', 'auto', 'auto',
'sqrt', 'auto', 'auto', 'auto', 'auto', 'sqrt']
},
index=list(range(20))
).sort_values(by='metric', ascending=True)
pd.testing.assert_frame_equal(results, expected_results)
def test_results_output_bayesian_search_skopt_ForecasterAutoreg_with_mocked_when_kwargs_gp_minimize():
'''
Test output of _bayesian_search_skopt in ForecasterAutoreg when kwargs_gp_minimize with mocked
(mocked done in Skforecast v0.4.3).
'''
forecaster = ForecasterAutoreg(
regressor = Ridge(random_state=123),
lags = 2 # Placeholder, the value will be overwritten
)
steps = 3
n_validation = 12
y_train = y[:-n_validation]
lags_grid = [2, 4]
search_space = {'alpha': Real(0.01, 1.0, "log-uniform", name='alpha')}
# kwargs_gp_minimize
initial_point_generator = 'lhs'
kappa = 1.8
results = _bayesian_search_skopt(
forecaster = forecaster,
y = y,
lags_grid = lags_grid,
search_space = search_space,
steps = steps,
metric = 'mean_absolute_error',
refit = True,
initial_train_size = len(y_train),
fixed_train_size = True,
n_trials = 10,
random_state = 123,
return_best = False,
verbose = False,
kwargs_gp_minimize = {'initial_point_generator': initial_point_generator,
'kappa': kappa }
)[0]
expected_results = pd.DataFrame({
'lags' :[[1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2],
[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4],
[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
'params':[{'alpha': 0.016838723959617538}, {'alpha': 0.10033990027966379},
{'alpha': 0.30984231371002086}, {'alpha': 0.02523894961617201},
{'alpha': 0.06431449919265146}, {'alpha': 0.04428828255962529},
{'alpha': 0.7862467218336935}, {'alpha': 0.21382904045131165},
{'alpha': 0.4646709105348175}, {'alpha': 0.01124059864722814},
{'alpha': 0.016838723959617538}, {'alpha': 0.10033990027966379},
{'alpha': 0.30984231371002086}, {'alpha': 0.02523894961617201},
{'alpha': 0.06431449919265146}, {'alpha': 0.04428828255962529},
{'alpha': 0.7862467218336935}, {'alpha': 0.21382904045131165},
{'alpha': 0.4646709105348175}, {'alpha': 0.01124059864722814}],
'metric':np.array([0.21183497939493612, 0.2120677429498087, 0.2125445833833647,
0.21185973952472195, 0.21197085675244506, 0.21191472647731882,
0.2132707683116569, 0.21234254975249803, 0.21282383637032143,
0.21181829991953996, 0.21669632191054566, 0.21662944006267573,
0.21637019858109752, 0.2166911187311533, 0.2166621393072383,
0.21667792427267493, 0.21566880163156743, 0.21649975575675726,
0.21614409053015884, 0.21669956732317974]),
'alpha' :np.array([0.016838723959617538, 0.10033990027966379, 0.30984231371002086,
0.02523894961617201, 0.06431449919265146, 0.04428828255962529,
0.7862467218336935, 0.21382904045131165, 0.4646709105348175,
0.01124059864722814, 0.016838723959617538, 0.10033990027966379,
0.30984231371002086, 0.02523894961617201, 0.06431449919265146,
0.04428828255962529, 0.7862467218336935, 0.21382904045131165,
0.4646709105348175, 0.01124059864722814])
},
index=list(range(20))
).sort_values(by='metric', ascending=True)
pd.testing.assert_frame_equal(results, expected_results)
def test_results_output_bayesian_search_skopt_ForecasterAutoreg_with_mocked_when_lags_grid_is_None():
'''
Test output of _bayesian_search_skopt in ForecasterAutoreg when lags_grid is None with mocked
(mocked done in Skforecast v0.4.3), should use forecaster.lags as lags_grid.
'''
forecaster = ForecasterAutoreg(
regressor = Ridge(random_state=123),
lags = 4
)
steps = 3
n_validation = 12
y_train = y[:-n_validation]
lags_grid = None
search_space = {'alpha': Real(0.01, 1.0, "log-uniform", name='alpha')}
results = _bayesian_search_skopt(
forecaster = forecaster,
y = y,
lags_grid = lags_grid,
search_space = search_space,
steps = steps,
metric = 'mean_absolute_error',
refit = True,
initial_train_size = len(y_train),
fixed_train_size = True,
n_trials = 10,
random_state = 123,
return_best = False,
verbose = False
)[0]
expected_results = pd.DataFrame({
'lags' :[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4],
[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
'params':[{'alpha': 0.26663099972129245}, {'alpha': 0.07193526575307788},
{'alpha': 0.24086278856848584}, {'alpha': 0.27434725570656354},
{'alpha': 0.0959926247515687}, {'alpha': 0.3631244766604131},
{'alpha': 0.06635119445083354}, {'alpha': 0.14434062917737708},
{'alpha': 0.019050287104581624}, {'alpha': 0.0633920962590419}],
'metric':np.array([0.21643005790510492, 0.21665565996188138, 0.2164646190462156,
0.21641953058020516, 0.21663365234334242, 0.2162939165190013,
0.21666043214039407, 0.21658325961136823, 0.21669499028423744,
0.21666290650172168]),
'alpha' :np.array([0.26663099972129245, 0.07193526575307788, 0.24086278856848584,
0.27434725570656354, 0.0959926247515687, 0.3631244766604131,
0.06635119445083354, 0.14434062917737708, 0.019050287104581624,
0.0633920962590419])
},
index=list(range(10))
).sort_values(by='metric', ascending=True)
pd.testing.assert_frame_equal(results, expected_results)
def test_results_output_bayesian_search_skopt_ForecasterAutoregCustom_with_mocked():
'''
Test output of _bayesian_search_skopt in ForecasterAutoregCustom with mocked
(mocked done in Skforecast v0.4.3).
'''
def create_predictors(y):
'''
Create first 4 lags of a time series, used in ForecasterAutoregCustom.
'''
lags = y[-1:-5:-1]
return lags
forecaster = ForecasterAutoregCustom(
regressor = Ridge(random_state=123),
fun_predictors = create_predictors,
window_size = 4
)
steps = 3
n_validation = 12
y_train = y[:-n_validation]
search_space = {'alpha': Real(0.01, 1.0, "log-uniform", name='alpha')}
results = _bayesian_search_skopt(
forecaster = forecaster,
y = y,
search_space = search_space,
steps = steps,
metric = 'mean_absolute_error',
refit = True,
initial_train_size = len(y_train),
fixed_train_size = True,
n_trials = 10,
random_state = 123,
return_best = False,
verbose = False
)[0]
expected_results = pd.DataFrame({
'lags' :['custom predictors', 'custom predictors', 'custom predictors',
'custom predictors', 'custom predictors', 'custom predictors',
'custom predictors', 'custom predictors', 'custom predictors',
'custom predictors'],
'params':[{'alpha': 0.26663099972129245}, {'alpha': 0.07193526575307788},
{'alpha': 0.24086278856848584}, {'alpha': 0.27434725570656354},
{'alpha': 0.0959926247515687}, {'alpha': 0.3631244766604131},
{'alpha': 0.06635119445083354}, {'alpha': 0.14434062917737708},
{'alpha': 0.019050287104581624}, {'alpha': 0.0633920962590419}],
'metric':np.array([0.21643005790510492, 0.21665565996188138, 0.2164646190462156,
0.21641953058020516, 0.21663365234334242, 0.2162939165190013,
0.21666043214039407, 0.21658325961136823, 0.21669499028423744,
0.21666290650172168]),
'alpha' :np.array([0.26663099972129245, 0.07193526575307788, 0.24086278856848584,
0.27434725570656354, 0.0959926247515687, 0.3631244766604131,
0.06635119445083354, 0.14434062917737708, 0.019050287104581624,
0.0633920962590419])
},
index=list(range(10))
).sort_values(by='metric', ascending=True)
pd.testing.assert_frame_equal(results, expected_results)
def test_evaluate_bayesian_search_skopt_when_return_best():
'''
Test forecaster is refited when return_best=True in _bayesian_search_skopt.
'''
forecaster = ForecasterAutoreg(
regressor = Ridge(random_state=123),
lags = 2 # Placeholder, the value will be overwritten
)
steps = 3
n_validation = 12
y_train = y[:-n_validation]
lags_grid = [2, 4]
search_space = {'alpha': Real(0.01, 1.0, "log-uniform", name='alpha')}
_bayesian_search_skopt(
forecaster = forecaster,
y = y,
lags_grid = lags_grid,
search_space = search_space,
steps = steps,
metric = 'mean_absolute_error',
refit = True,
initial_train_size = len(y_train),
fixed_train_size = True,
n_trials = 10,
random_state = 123,
return_best = True,
verbose = False
)
expected_lags = np.array([1, 2])
expected_alpha = 0.019050287104581624
assert (expected_lags == forecaster.lags).all()
assert expected_alpha == forecaster.regressor.alpha
def test_results_opt_best_output_bayesian_search_skopt_with_output_gp_minimize_skopt():
'''
Test results_opt_best output of _bayesian_search_skopt with output gp_minimize() skopt.
'''
forecaster = ForecasterAutoreg(
regressor = Ridge(random_state=123),
lags = 2
)
steps = 3
n_validation = 12
y_train = y[:-n_validation]
metric = 'mean_absolute_error'
initial_train_size = len(y_train)
fixed_train_size = True
refit = True
verbose = False
search_space = [Real(0.01, 1.0, "log-uniform", name='alpha')]
n_trials = 10
random_state = 123
@use_named_args(search_space)
def objective(
forecaster = forecaster,
y = y,
steps = steps,
metric = metric,
initial_train_size = initial_train_size,
fixed_train_size = fixed_train_size,
refit = refit,
verbose = verbose,
**params
) -> float:
forecaster.set_params(**params)
metric, _ = backtesting_forecaster(
forecaster = forecaster,
y = y,
steps = steps,
metric = metric,
initial_train_size = initial_train_size,
fixed_train_size = fixed_train_size,
refit = refit,
verbose = verbose
)
return abs(metric)
results_opt = gp_minimize(
func = objective,
dimensions = search_space,
n_calls = n_trials,
random_state = random_state
)
lags_grid = [4, 2]
search_space = {'alpha': Real(0.01, 1.0, "log-uniform", name='alpha')}
return_best = False
results_opt_best = _bayesian_search_skopt(
forecaster = forecaster,
y = y,
lags_grid = lags_grid,
search_space = search_space,
steps = steps,
metric = metric,
refit = refit,
initial_train_size = initial_train_size,
fixed_train_size = fixed_train_size,
n_trials = n_trials,
random_state = random_state,
return_best = return_best,
verbose = verbose,
kwargs_gp_minimize = {}
)[1]
assert results_opt.x == results_opt_best.x
assert results_opt.fun == results_opt_best.fun
|
nilq/baby-python
|
python
|
import os
import sys
import logging
import csv
from py.hookandline.HookandlineFpcDB_model import database, TideStations, Sites
class SiteManager:
def __init__(self, app=None, db=None):
super().__init__()
self._logger = logging.getLogger(__name__)
self._app = app
self._db = db
def import_sites(self):
"""
Method to import sites from a csv file and insert itnto the database
:return:
"""
app_dir = os.path.abspath(os.path.dirname(__file__))
data_dir = os.path.normpath(os.path.join(app_dir, "..\..", "data", "hookandline"))
sites_file = os.path.join(data_dir, "sites.csv")
if not os.path.isfile(sites_file):
return
f = open(sites_file, 'r')
reader = csv.reader(f)
for i, row in enumerate(reader):
if i == 0:
continue
lat_items = row[2].split(' ')
lat = int(lat_items[0]) + float(lat_items[1]) / 60
lon_items = row[3].split(' ')
lon = int(lon_items[0]) + float(lon_items[1]) / 60
try:
tide_station_id = TideStations.get(station_name=row[4]).tide_station
Sites.insert(name=row[0], is_active=row[1], latitude=lat, longitude=lon,
tide_station=tide_station_id, area_description=row[5],
is_cowcod_conservation_area=row[6]).execute()
print('{0} > {1} > {2}'.format(row[0], tide_station_id, row))
except Exception as ex:
Sites.insert(name=row[0], is_active=row[1], latitude=lat, longitude=lon,
area_description=row[5],
is_cowcod_conservation_area=row[6]).execute()
print('{0} > {1}'.format(row[0], row))
f.close()
if __name__ == '__main__':
sm = SiteManager()
# Import sites
sm.import_sites()
|
nilq/baby-python
|
python
|
#
# utilities.py
#
# (c) 2017 by Andreas Kraft
# License: BSD 3-Clause License. See the LICENSE file for further details.
#
# This module defines various internal utility functions for the library.
#
from lxml import etree as ET
import onem2mlib.constants as CON
import onem2mlib.utilities as UT
import onem2mlib.mcarequests
# define the namespace
_ns = {'m2m' : 'http://www.onem2m.org/xml/protocols'}
###############################################################################
#
# XML Utilities
#
def _searchExpression(elemName, relative):
if relative:
return './/'+elemName
return '//'+elemName
# Find a tag value (string) from the tree or, if not found, return the default.
# If relative is set to True then the search is done relatively to the provided
# element.
def getElement(tree, elemName, default=None, relative=False):
elem = tree.xpath(_searchExpression(elemName, relative), namespaces=_ns)
if elem and len(elem)>0 and elem[0].text:
result = elem[0].text
if isinstance(default, list):
result = result.split()
elif isinstance(default, bool): # bool must be checked before int!
result = bool(result)
elif isinstance(default, int):
result = int(result)
return result
return default
# Find all subtree elements from the tree. Returns a list.
# If relative is set to True then the search is done relatively to the provided
# element.
def getElements(tree, elemName, relative=False):
return tree.xpath(_searchExpression(elemName, relative), namespaces=_ns)
# Find the children elements of a specific XML element.
def getElementWithChildren(tree, elemName):
result = getElements(tree, elemName)
if result is not None:
return result
return None
# Find an attribute value from the tree/element or, if not found, return the default
def getAttribute(tree, elemName, attrName, default=None):
elem = tree.xpath('//'+elemName, namespaces=_ns)
if elem and len(elem)>0:
if attrName in elem[0].attrib:
return elem[0].attrib[attrName]
return default
# Create an XML element, including an optional namespace. Return the element
def createElement(elemName, namespace=None):
if namespace:
return ET.Element('{%s}%s' % (_ns['m2m'], elemName), nsmap=_ns)
else:
return ET.Element(elemName)
# Create and add an element with the given name to the root. Return the new element.
def addElement(root, name):
elem = createElement(name)
root.append(elem)
return elem
# Create and add an element with the given name to the root. Add content to it when
# the content is not None, or add the content nevertheless when mandatory is True.
def addToElement(root, name, content, mandatory=False):
if isinstance(content, int) or (content and len(content) > 0) or mandatory:
elem = createElement(name)
if isinstance(content, list):
elem.text = ' '.join(content)
else:
elem.text = str(content)
root.append(elem)
return elem
return None
# Create a new ElementTree from a sub-tree
def elementAsNewTree(tree):
return ET.ElementTree(tree).getroot()
# Create an XML structure out of a response
def responseToXML(response):
if response and response.content and len(response.content) > 0:
return stringToXML(response.content)
return None
# Return the qualified name of an element
def xmlQualifiedName(element, stripNameSpace=False):
qname = ET.QName(element)
if stripNameSpace:
return qname.localname
return qname
# Return the XML structure as a string
def xmlToString(xml):
return ET.tostring(xml)
# create a new XML structure from a string
def stringToXML(value):
return ET.fromstring(value)
###############################################################################
#
# JSON Utilities
#
# Find a tag value (string) from the JSON dictionaty or, if not found, return the default.
def getElementJSON(jsn, elemName, default=None):
if elemName in jsn:
elem = jsn[elemName]
return elem
return default
# Add an elememt to the jsn content
def addToElementJSON(jsn, name, content, mandatory=False):
if isinstance(content, int) or (content and len(content) > 0) or mandatory:
jsn[name] = content
# Find all the sub-structures of a specific name inside a JSON document
# TODO: Replace this with some xpath-like query package
def getALLSubElementsJSON(jsn, name):
result = []
for elemName in jsn:
elem = jsn[elemName]
if elemName == name:
result.append(elem)
elif isinstance(elem, dict):
result.extend(getALLSubElementsJSON(elem, name))
elif isinstance(elem, list):
for e in elem:
if isinstance(e, dict):
result.extend(getALLSubElementsJSON(e, name))
return result
###############################################################################
#
# Utilities
#
# Get the type from a response, for JSON and XML
def getTypeFromResponse(response, encoding):
if encoding == CON.Encoding_XML:
root = responseToXML(response)
return toInt(getElement(root, 'ty'))
elif encoding == CON.Encoding_JSON:
jsn = response.json()
# This is a bit complicated. We need to get to the type, which is hidden under an
# unknown object definition key. So, we asume that the JSON we get has the object
# definition in the first element (as it should be).
inner = list(jsn.values())[0]
return getElementJSON(inner, 'ty')
return -1
###############################################################################
#
# Formating
#
_width = 45
def strResource(name, shortName, resource, minusIndent=0):
if resource == None:
return ''
if isinstance(resource, list) and len(resource) == 0:
return ''
if not isinstance(resource, str):
resource = str(resource)
if resource and len(resource) > 0:
if shortName:
return ('\t%s(%s):' % (name, shortName)).ljust(_width-minusIndent) + str(resource) + '\n'
else:
return ('\t%s:' % (name)).ljust(_width-minusIndent) + str(resource) + '\n'
return ''
# Convert to an integer, except when it is None, then return None.
def toInt(value):
if value is None:
return None
return int(value)
###############################################################################
#
# Search
#
# Find a sub-resource
def _findSubResource(resource, type):
if not resource or not resource.session or not resource.resourceID:
return None
result = []
ris = onem2mlib.mcarequests.discoverInCSE(resource, filter=[UT.newTypeFilterCriteria(int(type))], structuredResult=True)
if ris:
# The following is a hack to restrict the search result to the direct child
# level. Yes, the oneM2M "level" attribute could be used for that, but it
# doesn't seem to be supported that much (at least not in om2m).
# Anyway, the hack works like that: count the forward slashes, ie. the
# number of path elements, and only add those from the response to the result
# which have count+1 path elements.
sid = resource._structuredResourceID()
count = sid.count('/') + 1
for ri in ris:
if ri.count('/') == count: # <- hack s.o.
subResource = _newResourceFromRID(type, ri, resource)
subResource.retrieveFromCSE()
result.append(subResource)
# Still a hack: sort the list by the ct attribute
result.sort(key=lambda x: x.creationTime)
return result
# Find a resource from a list by its resource name
def _findResourceInList(resources, resourceName):
if resources and len(resources)>0:
for res in resources:
if res.resourceName == resourceName:
return res
return None
# Create a new resource object with a given type, RI and parent
def _newResourceFromRID(type, ri, parent):
res = _newResourceFromType(type, parent)
if res:
res.resourceID = ri
return res
def _newResourceFromType(type, parent):
if type == CON.Type_ContentInstance: return onem2mlib.ContentInstance(parent, instantly=False)
elif type == CON.Type_Container: return onem2mlib.Container(parent, instantly=False)
elif type == CON.Type_AE: return onem2mlib.AE(parent, instantly=False)
elif type == CON.Type_Group: return onem2mlib.Group(parent, instantly=False)
elif type == CON.Type_ACP: return onem2mlib.AccessControlPolicy(parent, instantly=False)
elif type == CON.Type_Subscription: return onem2mlib.Subscription(parent, instantly=False)
elif type == CON.Type_RemoteCSE: return onem2mlib.RemoteCSE(parent, instantly=False)
return None
def _newResourceFromTypeString(typeString, parent):
if typeString == 'cin': return _newResourceFromType(CON.Type_ContentInstance, parent)
elif typeString == 'cnt': return _newResourceFromType(CON.Type_Container, parent)
elif typeString == 'ae': return _newResourceFromType(CON.Type_AE, parent)
elif typeString == 'grp': return _newResourceFromType(CON.Type_Group, parent)
elif typeString == 'acp': return _newResourceFromType(CON.Type_ACP, parent)
elif typeString == 'sub': return _newResourceFromType(CON.Type_Subscription, parent)
elif typeString == 'csr': return _newResourceFromType(CON.Type_RemoteCSE, parent)
return None
# Get a resource from the CSE by its resourceName
def _getResourceFromCSEByResourceName(type, rn, parent):
res = None
if type == CON.Type_ContentInstance: res = onem2mlib.ContentInstance(parent, resourceName=rn, instantly=False)
elif type == CON.Type_Container: res = onem2mlib.Container(parent, resourceName=rn, instantly=False)
elif type == CON.Type_AE: res = onem2mlib.AE(parent, resourceName=rn, instantly=False)
elif type == CON.Type_Group: res = onem2mlib.Group(parent, resourceName=rn, instantly=False)
elif type == CON.Type_ACP: res = onem2mlib.AccessControlPolicy(parent, resourceName=rn, instantly=False)
elif type == CON.Type_Subscription: res = onem2mlib.Subscription(parent, resourceName=rn, instantly=False)
elif type == CON.Type_RemoteCSE: res = onem2mlib.RemoteCSE(parent, resourceName=rn, instantly=False)
if res is not None and res.retrieveFromCSE():
return res
return None
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: networking/v1alpha3/workload_entry.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='networking/v1alpha3/workload_entry.proto',
package='istio.networking.v1alpha3',
syntax='proto3',
serialized_options=_b('Z istio.io/api/networking/v1alpha3'),
serialized_pb=_b('\n(networking/v1alpha3/workload_entry.proto\x12\x19istio.networking.v1alpha3\x1a\x1fgoogle/api/field_behavior.proto\"\xd8\x02\n\rWorkloadEntry\x12\x14\n\x07\x61\x64\x64ress\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x42\n\x05ports\x18\x02 \x03(\x0b\x32\x33.istio.networking.v1alpha3.WorkloadEntry.PortsEntry\x12\x44\n\x06labels\x18\x03 \x03(\x0b\x32\x34.istio.networking.v1alpha3.WorkloadEntry.LabelsEntry\x12\x0f\n\x07network\x18\x04 \x01(\t\x12\x10\n\x08locality\x18\x05 \x01(\t\x12\x0e\n\x06weight\x18\x06 \x01(\r\x12\x17\n\x0fservice_account\x18\x07 \x01(\t\x1a,\n\nPortsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\r:\x02\x38\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\"Z istio.io/api/networking/v1alpha3b\x06proto3')
,
dependencies=[google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,])
_WORKLOADENTRY_PORTSENTRY = _descriptor.Descriptor(
name='PortsEntry',
full_name='istio.networking.v1alpha3.WorkloadEntry.PortsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.networking.v1alpha3.WorkloadEntry.PortsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.networking.v1alpha3.WorkloadEntry.PortsEntry.value', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=358,
serialized_end=402,
)
_WORKLOADENTRY_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='istio.networking.v1alpha3.WorkloadEntry.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.networking.v1alpha3.WorkloadEntry.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.networking.v1alpha3.WorkloadEntry.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=404,
serialized_end=449,
)
_WORKLOADENTRY = _descriptor.Descriptor(
name='WorkloadEntry',
full_name='istio.networking.v1alpha3.WorkloadEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='address', full_name='istio.networking.v1alpha3.WorkloadEntry.address', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ports', full_name='istio.networking.v1alpha3.WorkloadEntry.ports', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='istio.networking.v1alpha3.WorkloadEntry.labels', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='network', full_name='istio.networking.v1alpha3.WorkloadEntry.network', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='locality', full_name='istio.networking.v1alpha3.WorkloadEntry.locality', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight', full_name='istio.networking.v1alpha3.WorkloadEntry.weight', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='service_account', full_name='istio.networking.v1alpha3.WorkloadEntry.service_account', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_WORKLOADENTRY_PORTSENTRY, _WORKLOADENTRY_LABELSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=105,
serialized_end=449,
)
_WORKLOADENTRY_PORTSENTRY.containing_type = _WORKLOADENTRY
_WORKLOADENTRY_LABELSENTRY.containing_type = _WORKLOADENTRY
_WORKLOADENTRY.fields_by_name['ports'].message_type = _WORKLOADENTRY_PORTSENTRY
_WORKLOADENTRY.fields_by_name['labels'].message_type = _WORKLOADENTRY_LABELSENTRY
DESCRIPTOR.message_types_by_name['WorkloadEntry'] = _WORKLOADENTRY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
WorkloadEntry = _reflection.GeneratedProtocolMessageType('WorkloadEntry', (_message.Message,), {
'PortsEntry' : _reflection.GeneratedProtocolMessageType('PortsEntry', (_message.Message,), {
'DESCRIPTOR' : _WORKLOADENTRY_PORTSENTRY,
'__module__' : 'networking.v1alpha3.workload_entry_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.WorkloadEntry.PortsEntry)
})
,
'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), {
'DESCRIPTOR' : _WORKLOADENTRY_LABELSENTRY,
'__module__' : 'networking.v1alpha3.workload_entry_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.WorkloadEntry.LabelsEntry)
})
,
'DESCRIPTOR' : _WORKLOADENTRY,
'__module__' : 'networking.v1alpha3.workload_entry_pb2'
# @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.WorkloadEntry)
})
_sym_db.RegisterMessage(WorkloadEntry)
_sym_db.RegisterMessage(WorkloadEntry.PortsEntry)
_sym_db.RegisterMessage(WorkloadEntry.LabelsEntry)
DESCRIPTOR._options = None
_WORKLOADENTRY_PORTSENTRY._options = None
_WORKLOADENTRY_LABELSENTRY._options = None
_WORKLOADENTRY.fields_by_name['address']._options = None
# @@protoc_insertion_point(module_scope)
|
nilq/baby-python
|
python
|
from ..query.grammars import SQLiteGrammar
from .BaseConnection import BaseConnection
from ..schema.platforms import SQLitePlatform
from ..query.processors import SQLitePostProcessor
from ..exceptions import DriverNotFound, QueryException
class SQLiteConnection(BaseConnection):
"""SQLite Connection class."""
name = "sqlite"
_connection = None
def __init__(
self,
host=None,
database=None,
user=None,
port=None,
password=None,
prefix=None,
full_details=None,
options=None,
name=None,
):
self.host = host
if port:
self.port = int(port)
else:
self.port = port
self.database = database
self.user = user
self.password = password
self.prefix = prefix
self.full_details = full_details or {}
self.options = options or {}
self._cursor = None
self.transaction_level = 0
self.open = 0
if name:
self.name = name
def make_connection(self):
"""This sets the connection on the connection class"""
try:
import sqlite3
except ModuleNotFoundError:
raise DriverNotFound(
"You must have the 'sqlite3' package installed to make a connection to SQLite."
)
if self.has_global_connection():
return self.get_global_connection()
self._connection = sqlite3.connect(self.database, isolation_level=None)
self._connection.row_factory = sqlite3.Row
self.open = 1
return self
@classmethod
def get_default_query_grammar(cls):
return SQLiteGrammar
@classmethod
def get_default_platform(cls):
return SQLitePlatform
@classmethod
def get_default_post_processor(cls):
return SQLitePostProcessor
def get_database_name(self):
return self.database
def reconnect(self):
pass
def commit(self):
"""Transaction"""
if self.get_transaction_level() == 1:
self.transaction_level -= 1
self._connection.commit()
self._connection.isolation_level = None
self._connection.close()
self.open = 0
self.transaction_level -= 1
return self
def begin(self):
"""Sqlite Transaction"""
self._connection.isolation_level = "DEFERRED"
self.transaction_level += 1
return self
def rollback(self):
"""Transaction"""
if self.get_transaction_level() == 1:
self.transaction_level -= 1
self._connection.rollback()
self._connection.close()
self.open = 0
self.transaction_level -= 1
return self
def get_cursor(self):
return self._cursor
def get_transaction_level(self):
return self.transaction_level
def query(self, query, bindings=(), results="*"):
"""Make the actual query that will reach the database and come back with a result.
Arguments:
query {string} -- A string query. This could be a qmarked string or a regular query.
bindings {tuple} -- A tuple of bindings
Keyword Arguments:
results {str|1} -- If the results is equal to an asterisks it will call 'fetchAll'
else it will return 'fetchOne' and return a single record. (default: {"*"})
Returns:
dict|None -- Returns a dictionary of results or None
"""
if not self.open:
self.make_connection()
try:
self._cursor = self._connection.cursor()
if isinstance(query, list):
for query in query:
self.statement(query)
else:
query = query.replace("'?'", "?")
self.statement(query, bindings)
if results == 1:
result = [dict(row) for row in self._cursor.fetchall()]
if result:
return result[0]
else:
return [dict(row) for row in self._cursor.fetchall()]
except Exception as e:
raise QueryException(str(e)) from e
finally:
if self.get_transaction_level() <= 0:
self._connection.close()
self.open = 0
def format_cursor_results(self, cursor_result):
return [dict(row) for row in cursor_result]
def select_many(self, query, bindings, amount):
self._cursor = self._connection.cursor()
self.statement(query)
if not self.open:
self.make_connection()
result = self.format_cursor_results(self._cursor.fetchmany(amount))
while result:
yield result
result = self.format_cursor_results(self._cursor.fetchmany(amount))
|
nilq/baby-python
|
python
|
########## Script 1 ###################
import sys
from RK_IO_model import RK_IO_methods
from Generalized_RK_Framework import generalized_RK_framework
import pdb #for debugging
import numpy as np
import pyomo.environ as pyo
from pyomo.opt import SolverFactory
from pyomo.opt import SolverStatus, TerminationCondition
import pyomo.mpec as pyompec #for the complementarity
import math
from scipy.io import savemat, loadmat
import pandas
import time
import matplotlib.pyplot as plt
import pickle
import networkx as nx
################### Step 1: Generating Data ###############3#######
######################### nxn Grid ####################################
nxn_grid = nx.generators.lattice.grid_2d_graph(int(sys.argv[1]),int(sys.argv[1]))
incidence_matrix = nx.linalg.graphmatrix.incidence_matrix(nxn_grid)
incid_mat = incidence_matrix.todense()
(num_nodes,num_arcs) = np.shape(incid_mat)
for i in range(0,num_arcs):
ph = incid_mat[:,i]
#for j in range(0,num_nodes):
j = 0
while ph[j] != 1:
j = j + 1
incid_mat[j,i] = -1
full_incidence_matrix = np.concatenate((incid_mat,-1*incid_mat),1)
###################################################################################
################### Step 2: Setting up Object and Saving Matlab #############################
name_of_grid = str(sys.argv[1])+"x"+str(sys.argv[1])+"_Grid"
GRKF_Object = generalized_RK_framework(num_nodes=num_nodes,num_arcs=num_arcs*2,num_players=int(sys.argv[3]),num_trials=10,\
node_arc_incidence_matrix=full_incidence_matrix,\
name_of_graph=name_of_grid)
alpha_flag = int(sys.argv[2])
if alpha_flag == 1:
alpha = float(sys.argv[3])*0.5
elif alpha_flag == 2:
alpha = float(sys.argv[3])
GRKF_Object.saving_for_matlab_files_randomized_costs(lowerbound_c=1,upperbound_c=5,\
lowerbound_chat=5,upperbound_chat=20,\
alpha=alpha,if_different_costs=1)
################### Step 3: Saving the Object #################################
#https://www.datacamp.com/community/tutorials/pickle-python-tutorial
name_of_file = "class_object_1"
test = open(name_of_file,'wb')
pickle.dump(GRKF_Object,test)
test.close()
#https://www.mathworks.com/matlabcentral/answers/327116-run-function-from-command-line
#https://www.mathworks.com/matlabcentral/answers/410079-how-can-i-execute-an-m-file-from-windows-command-line-without-opening-matlab-s-command-window
#https://www.mathworks.com/matlabcentral/answers/479672-how-to-run-batch-file-in-matlab
#^The site that helped with the MATLAB command line code
|
nilq/baby-python
|
python
|
str2slice = "Just do it!"
print(str2slice[10]) # prints "!"
print(str2slice[5:7]) # prints "do"
print(str2slice[8:]) # prints "it!"
print(str2slice[:4]) # prints "Just"
print("Don't " + str2slice[5:]) # prints "Don't do it!"
|
nilq/baby-python
|
python
|
import abc
from typing import Any
from typing import Dict
from typing import Optional
from typing import Sequence
from optuna.distributions import BaseDistribution
from optuna.study import Study
from optuna.trial import FrozenTrial
from optuna.trial import TrialState
class BaseSampler(object, metaclass=abc.ABCMeta):
"""Base class for samplers.
Optuna combines two types of sampling strategies, which are called *relative sampling* and
*independent sampling*.
*The relative sampling* determines values of multiple parameters simultaneously so that
sampling algorithms can use relationship between parameters (e.g., correlation).
Target parameters of the relative sampling are described in a relative search space, which
is determined by :func:`~optuna.samplers.BaseSampler.infer_relative_search_space`.
*The independent sampling* determines a value of a single parameter without considering any
relationship between parameters. Target parameters of the independent sampling are the
parameters not described in the relative search space.
More specifically, parameters are sampled by the following procedure.
At the beginning of a trial, :meth:`~optuna.samplers.BaseSampler.infer_relative_search_space`
is called to determine the relative search space for the trial. Then,
:meth:`~optuna.samplers.BaseSampler.sample_relative` is invoked to sample parameters
from the relative search space. During the execution of the objective function,
:meth:`~optuna.samplers.BaseSampler.sample_independent` is used to sample
parameters that don't belong to the relative search space.
The following figure depicts the lifetime of a trial and how the above three methods are
called in the trial.
.. image:: ../../../image/sampling-sequence.png
|
"""
def __str__(self) -> str:
return self.__class__.__name__
@abc.abstractmethod
def infer_relative_search_space(
self, study: Study, trial: FrozenTrial
) -> Dict[str, BaseDistribution]:
"""Infer the search space that will be used by relative sampling in the target trial.
This method is called right before :func:`~optuna.samplers.BaseSampler.sample_relative`
method, and the search space returned by this method is passed to it. The parameters not
contained in the search space will be sampled by using
:func:`~optuna.samplers.BaseSampler.sample_independent` method.
Args:
study:
Target study object.
trial:
Target trial object.
Take a copy before modifying this object.
Returns:
A dictionary containing the parameter names and parameter's distributions.
.. seealso::
Please refer to :func:`~optuna.samplers.intersection_search_space` as an
implementation of :func:`~optuna.samplers.BaseSampler.infer_relative_search_space`.
"""
raise NotImplementedError
@abc.abstractmethod
def sample_relative(
self, study: Study, trial: FrozenTrial, search_space: Dict[str, BaseDistribution]
) -> Dict[str, Any]:
"""Sample parameters in a given search space.
This method is called once at the beginning of each trial, i.e., right before the
evaluation of the objective function. This method is suitable for sampling algorithms
that use relationship between parameters such as Gaussian Process and CMA-ES.
.. note::
The failed trials are ignored by any build-in samplers when they sample new
parameters. Thus, failed trials are regarded as deleted in the samplers'
perspective.
Args:
study:
Target study object.
trial:
Target trial object.
Take a copy before modifying this object.
search_space:
The search space returned by
:func:`~optuna.samplers.BaseSampler.infer_relative_search_space`.
Returns:
A dictionary containing the parameter names and the values.
"""
raise NotImplementedError
@abc.abstractmethod
def sample_independent(
self,
study: Study,
trial: FrozenTrial,
param_name: str,
param_distribution: BaseDistribution,
) -> Any:
"""Sample a parameter for a given distribution.
This method is called only for the parameters not contained in the search space returned
by :func:`~optuna.samplers.BaseSampler.sample_relative` method. This method is suitable
for sampling algorithms that do not use relationship between parameters such as random
sampling and TPE.
.. note::
The failed trials are ignored by any build-in samplers when they sample new
parameters. Thus, failed trials are regarded as deleted in the samplers'
perspective.
Args:
study:
Target study object.
trial:
Target trial object.
Take a copy before modifying this object.
param_name:
Name of the sampled parameter.
param_distribution:
Distribution object that specifies a prior and/or scale of the sampling algorithm.
Returns:
A parameter value.
"""
raise NotImplementedError
def after_trial(
self,
study: Study,
trial: FrozenTrial,
state: TrialState,
values: Optional[Sequence[float]],
) -> None:
"""Trial post-processing.
This method is called after the objective function returns and right before the trials is
finished and its state is stored.
.. note::
Added in v2.4.0 as an experimental feature. The interface may change in newer versions
without prior notice. See https://github.com/optuna/optuna/releases/tag/v2.4.0.
Args:
study:
Target study object.
trial:
Target trial object.
Take a copy before modifying this object.
state:
Resulting trial state.
values:
Resulting trial values. Guaranteed to not be :obj:`None` if trial succeeded.
"""
pass
def reseed_rng(self) -> None:
"""Reseed sampler's random number generator.
This method is called by the :class:`~optuna.study.Study` instance if trials are executed
in parallel with the option ``n_jobs>1``. In that case, the sampler instance will be
replicated including the state of the random number generator, and they may suggest the
same values. To prevent this issue, this method assigns a different seed to each random
number generator.
"""
pass
def _raise_error_if_multi_objective(self, study: Study) -> None:
if study._is_multi_objective():
raise ValueError(
"If the study is being used for multi-objective optimization, "
f"{self.__class__.__name__} cannot be used."
)
|
nilq/baby-python
|
python
|
from ledfx.effects.temporal import TemporalEffect
from ledfx.effects.gradient import GradientEffect
#from ledfx.color import COLORS, GRADIENTS
#from ledfx.effects import Effect
import voluptuous as vol
import numpy as np
import logging
class FadeEffect(TemporalEffect, GradientEffect):
"""
Fades through the colours of a gradient
"""
NAME = "Fade"
CONFIG_SCHEMA = vol.Schema({
vol.Optional('gradient_method', description='Function used to generate gradient', default = 'bezier'): vol.In(["cubic_ease", "bezier"]),
})
def config_updated(self, config):
self.location = 1
self.forward = True
def effect_loop(self):
if self.location in (0, 500):
self.forward = not self.forward
if self.forward:
self.location += 1
else:
self.location -= 1
color = self.get_gradient_color(self.location/500.0)
self.pixels = np.tile(color, (self.pixel_count, 1))
|
nilq/baby-python
|
python
|
import numpy as np
from hand import Hand
from mulliganTester import MulliganTester
class BurnMullTester(MulliganTester):
hand_types = ["twolandCreature","goodhand","keepable"]
hand = Hand("decklists/burn.txt")
output_file_header = "burn"
land_value_list = ["Mountain", "Bloodstained Mire", "Inspiring Vantage", "Sacred Foundry", "Scalding Tarn", "Wooded Foothills"]
def __init__(self):
MulliganTester.__init__(self)
self.rwSources = ["Bloodstained Mire", "Inspiring Vantage", "Sacred Foundry", "Scalding Tarn", "Wooded Foothills"]
self.mountain = ["Mountain"]
self.oneDropC = ["Goblin Guide", "Grim Lavamancer", "Monastery Swiftspear"]
def CheckHand(self):
numRW = self.hand.count_of(self.rwSources)
numLands = numRW + self.hand.count_of(self.mountain)
numEarlyThreat = self.hand.count_of(self.oneDropC)
twolandCreature = False
goodhand = False
keepable = False
numSpells = self.hand.handsize() - numLands
if numLands > 1 and numSpells >= 5 and numRW >= 1 and numEarlyThreat >= 1:
twolandCreature = True
elif numLands > 1 and (numSpells >= 4 or (numSpells >= 3 and numEarlyThreat > 0)):
goodhand = True
elif numLands > 1 and numSpells >= 3:
keepable = True
elif numLands == 1 and numEarlyThreat > 1:
keepable = True
results = np.array([twolandCreature, goodhand, keepable])
return results
if __name__ == "__main__":
burnTester = BurnMullTester()
burnTester.run()
|
nilq/baby-python
|
python
|