prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import subprocess
import json
import os
import csv
import numpy as np
import pandas as pd
import pysam
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
def get_orf(input_genome, output_genome, orf):
orf = int(orf)
record = SeqIO.read(input_genome, 'fasta')
record.seq = record.seq[orf:]
SeqIO.write(record, output_genome, 'fasta')
def backtranslate(input_nucleotide, input_protein, output_codon):
nucleotides = SeqIO.parse(input_nucleotide, 'fasta')
proteins = SeqIO.parse(input_protein, 'fasta')
codons = []
for protein_record, nucleotide_record in zip(proteins, nucleotides):
i = 0
codon_list = []
for character in protein_record.seq:
if character != '-':
codon = str(nucleotide_record.seq[3*i:3*i+3])
codon_list.append(codon)
i += 1
else:
codon_list.append('---')
codon_record = SeqRecord(
Seq(''.join(codon_list)),
id=protein_record.id,
description=protein_record.description
)
codons.append(codon_record)
SeqIO.write(codons, output_codon, 'fasta')
def select_simulated_gene(dataset, gene, output):
aligned_filename = "output/simulation/%s/aligned_%s_orf-%d_codon.fasta"
nucleotide_genome_filename = "output/simulation/%s/genome.fasta" % dataset
nucleotide_genome = SeqIO.read(nucleotide_genome_filename, 'fasta')
max_percent_identity = 0
for i in range(3):
non_gaps = 0
matches = 0
codon_list = []
records = SeqIO.parse(aligned_filename % (dataset, gene, i), 'fasta')
translated_genome = next(records)
reference = next(records)
genome_i = 0
for j in range(len(reference)):
if reference[j] != '-':
non_gaps += 1
codon = str(nucleotide_genome[3*genome_i+i:3*genome_i+i+3].seq)
codon_list.append(codon)
if reference[j] == translated_genome[j]:
matches += 1
if translated_genome[j] != '-':
genome_i += 1
percent_identity = matches/non_gaps
if percent_identity > max_percent_identity:
max_percent_identity = percent_identity
desired_codons = ''.join(codon_list)
record = SeqRecord(
Seq(desired_codons).ungap('-'),
id=nucleotide_genome.id,
description=gene
)
SeqIO.write(record, output, 'fasta')
def write_abayesqr_config(sam_filename, reference_filename, output):
config_string = ("""filename of reference sequence (FASTA) : %s
filname of the aligned reads (sam format) : %s
paired-end (1 = true, 0 = false) : 0
SNV_thres : 0.01
reconstruction_start : 1
reconstruction_stop: 1300
min_mapping_qual : 20
min_read_length : 50
max_insert_length : 250
characteristic zone name : test
seq_err (assumed sequencing error rate(%%)) : 0.1
MEC improvement threshold : 0.0395 """ % (reference_filename, sam_filename))
with open(output, 'w') as config_file:
config_file.write(config_string)
def parse_abayesqr_output(input_text, output_fasta):
with open(input_text) as input_file:
lines = input_file.readlines()
records = []
for i, line in enumerate(lines):
if i % 2 == 0:
freq = float(line.split()[-1])
number = int(i/2)+1
header = 'haplotype-%d_freq-%f' % (number, freq)
if i % 2 == 1:
seq = Seq(line.strip())
record = SeqRecord(seq, id=header, description='')
records.append(record)
SeqIO.write(records, output_fasta, 'fasta')
def pairwise_distance_csv(fasta_filename, csv_filename):
records = list(SeqIO.parse(fasta_filename, 'fasta'))
np_seqs = np.array(
[list(str(record.seq)) for record in records],
dtype='<U1'
)
first_records = []
second_records = []
distances = []
search_term = 'quasispecies'
for i in range(len(records)):
for j in range(len(records)):
if records[j].name[: len(search_term)] == search_term:
continue
first_records.append(records[i].id)
second_records.append(records[j].id)
distance = (np_seqs[i, :] != np_seqs[j, :]).sum()
distances.append(distance)
pd.DataFrame({
'first_record': first_records,
'second_record': second_records,
'distance': distances,
}).to_csv(csv_filename)
def add_subtype_information(input_csv, output_csv):
df = pd.read_csv(input_csv)
df['Subtype1'] = df['ID1'].apply(lambda row: row.split('.')[0])
df['Subtype2'] = df['ID2'].apply(lambda row: row.split('.')[0])
df.to_csv(output_csv, index=False)
def extract_truth(
input_fasta, reference_path, dataset, reference, output_path,
output_json_path
):
sequences = list(SeqIO.parse(input_fasta, "fasta"))
aligned_sequences = []
output_dir = os.path.join("output", "truth", dataset)
tmp_dir = os.path.join(
output_dir, "truth-%s-%s-temp" % (dataset, reference)
)
os.mkdir(tmp_dir)
for sequence in sequences:
sequence_path = os.path.join(tmp_dir, "ref.fasta")
alignment_path = os.path.join(tmp_dir, "aligned.fasta")
SeqIO.write(sequence, sequence_path, "fasta")
command = [
"water", "-asequence", sequence_path, "-bsequence",
reference_path, "-gapopen", "10.0", "-gapextend", ".5", "-aformat",
"fasta", "-outfile", alignment_path
]
subprocess.run(command)
aligned_sequence = list(SeqIO.parse(alignment_path, "fasta"))[0]
aligned_sequence.seq = aligned_sequence.seq.ungap('-')
aligned_sequences.append(aligned_sequence)
os.remove(sequence_path)
os.remove(alignment_path)
os.rmdir(tmp_dir)
sequence_length = min([len(record.seq) for record in aligned_sequences])
for record in aligned_sequences:
record.seq = record.seq[:sequence_length]
SeqIO.write(aligned_sequences, output_path, "fasta")
pairwise_distances = []
for i in range(len(aligned_sequences)):
first_sequence = aligned_sequences[i]
first_np = np.array(list(first_sequence.seq), dtype='<U1')
for j in range(i+1, len(aligned_sequences)):
second_sequence = aligned_sequences[j]
second_np = np.array(list(second_sequence.seq), dtype='<U1')
disagreement = int((first_np != second_np).sum())
pairwise_distances.append({
'sequenceA': first_sequence.name,
'sequenceB': second_sequence.name,
'disagreement': disagreement
})
with open(output_json_path, 'w') as json_file:
json.dump(pairwise_distances, json_file, indent=2)
def covarying_truth(
input_computed, input_actual, input_reference, output_json
):
reference = SeqIO.read(input_reference, 'fasta')
rl = len(reference.seq)
with open(input_computed) as input_file:
cvs = json.load(input_file)
with open(input_actual) as input_file:
true_cvs = json.load(input_file)
tp = []
fp = []
tn = []
fn = []
for i in range(rl):
if i in true_cvs and i in cvs:
tp.append(i)
elif i in true_cvs and i not in cvs:
fn.append(i)
elif i not in true_cvs and i in cvs:
fp.append(i)
elif i not in true_cvs and i not in cvs:
tn.append(i)
precision = len(tp)/(len(tp)+len(fp))
recall = len(tp)/(len(tp)+len(fn))
result = {
'true_positives': tp,
'true_negative': tn,
'false_positives': fp,
'false_negatives': fn,
'precision': precision,
'recall': recall
}
with open(output_json, 'w') as output_file:
json.dump(result, output_file, indent=2)
def restrict_fasta_to_cvs(input_fasta, input_cvs, output_fasta):
with open(input_cvs) as json_file:
cvs = json.load(json_file)
records = list(SeqIO.parse(input_fasta, 'fasta'))
for record in records:
record.seq = Seq(''.join([record.seq[site] for site in cvs]))
SeqIO.write(records, output_fasta, 'fasta')
def downsample_bam(input_bam_path, output_bam_path, downsample_amount):
downsample_percentage = 1 - int(downsample_amount) / 100
input_bam = pysam.AlignmentFile(input_bam_path, 'rb')
number_of_reads = input_bam.count()
downsample_number = np.ceil(downsample_percentage * number_of_reads) \
.astype(np.int)
np.random.seed(1)
downsample_indices = np.random.choice(
number_of_reads, downsample_number, replace=False
)
downsample_indices.sort()
downsample_index = 0
output_bam = pysam.AlignmentFile(
output_bam_path, 'wb', header=input_bam.header
)
for i, read in enumerate(input_bam.fetch()):
if i == downsample_indices[downsample_index]:
output_bam.write(read)
downsample_index += 1
if downsample_index == len(downsample_indices):
break
output_bam.close()
pysam.index(output_bam_path)
input_bam.close()
def pluck_record(input_fasta_path, output_fasta_path, record):
all_records = SeqIO.parse(input_fasta_path, 'fasta')
desired_record = SeqIO.to_dict(all_records)[record]
SeqIO.write(desired_record, output_fasta_path, 'fasta')
def single_mapping_dataset(bam_path, ref_path, output_path):
bam = pysam.AlignmentFile(bam_path)
ref = SeqIO.read(ref_path, 'fasta')
percent_identity = np.zeros(bam.mapped, dtype=np.float)
differences = np.zeros(bam.mapped, dtype=np.float)
number_of_aligned_pairs = np.zeros(bam.mapped, dtype=np.float)
for i, read in enumerate(bam.fetch()):
aligned_pairs = read.get_aligned_pairs(matches_only=True)
aligned_query = np.array([
read.query[pair[0]] for pair in aligned_pairs
], dtype='<U1')
aligned_reference = np.array([
ref[pair[1]] for pair in aligned_pairs
], dtype='<U1')
agreement = (aligned_query == aligned_reference).sum()
number_of_aligned_pairs[i] = len(aligned_pairs)
differences[i] = number_of_aligned_pairs[i] - agreement
percent_identity[i] = agreement/number_of_aligned_pairs[i]
quality = np.array([
read.mapping_quality for read in bam.fetch()
], dtype=np.int)
query_length = np.array([
read.query_length for read in bam.fetch()
], dtype=np.int)
result = pd.DataFrame({
'mapping_quality': quality,
'differences': differences,
'number_of_aligned_pairs': number_of_aligned_pairs,
'percent_identity': percent_identity,
'query_length': query_length
}, index=[read.query_name for read in bam.fetch()])
result.to_csv(output_path, index_label='read_id')
def full_fvm_mapping_dataset(dataset_paths, output_csv_path):
all_datasets = list(map(
lambda path: pd.read_csv(path, index_col='read_id'),
dataset_paths
))
for dataset_path, dataset in zip(dataset_paths, all_datasets):
dataset_name = dataset_path.split('/')[-2]
dataset['reference'] = dataset_name
pd.concat(all_datasets, axis=0, sort=False, ignore_index=True) \
.to_csv(output_csv_path)
def true_covarying_kmers(input_fasta, input_json, output_csv, k):
k = int(k)
records = np.array([
list(record.seq)
for record in SeqIO.parse(input_fasta, 'fasta')
], dtype='<U1')
data = {
**{'index_%d' % i: [] for i in range(k)},
**{'character_%d' % i: [] for i in range(k)}
}
with open(input_json) as json_file:
covarying_sites = np.array(json.load(json_file), dtype=np.int)
for i in range(len(covarying_sites) - k):
covarying_indices = covarying_sites[i:i+k]
covarying_kmers = set()
for row_index in range(records.shape[0]):
covarying_kmer = ''.join(records[row_index, covarying_indices])
covarying_kmers.add(covarying_kmer)
for covarying_kmer in list(covarying_kmers):
for i in range(k):
data['index_%d' % i].append(covarying_indices[i])
data['character_%d' % i].append(covarying_kmer[i])
| pd.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 20 20:59:18 2019
@author: <NAME>
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
data = pd.read_csv('data/data.csv')
data.drop(['Unnamed: 0'], axis=1, inplace=True)
# 'match_id', 'team_id'
data.describe()
data.apply(lambda x:sum(x.isnull()))
data.apply(lambda x:len(x.unique()))
data['shot_id_number'] = data.index+1
data.fillna({'remaining_min':data['remaining_min'].mean(),
'power_of_shot':data['power_of_shot'].mean(),
'remaining_sec':data['remaining_sec'].mean(),
'distance_of_shot':data['distance_of_shot'].mean(),
'location_x':data['location_x'].mean(),
'location_y':data['location_y'].mean(),
'remaining_min.1':data['remaining_min.1'].mean(),
'power_of_shot.1':data['power_of_shot.1'].mean(),
'remaining_sec.1':data['remaining_sec.1'].mean(),
'distance_of_shot.1':data['distance_of_shot.1'].mean(),
'knockout_match.1':data['knockout_match.1'].mean()},inplace=True)
vars=['knockout_match','area_of_shot','shot_basics', 'range_of_shot', 'team_name',
'date_of_game', 'home/away', 'type_of_shot', 'type_of_combined_shot',
'lat/lng', 'game_season']
for var in vars:
data[var].fillna(method='ffill', inplace=True)
data['type_of_combined_shot'].fillna(method='bfill', inplace=True)
data['home_or_away'] = data['home/away'].apply(lambda x:
'AWA' if x[5:6] == '@' else 'HOM')
data['time_min.1'] = data['remaining_min.1'] + data['remaining_sec.1'].apply(lambda x:
x if x==0 else x/60)
times = [i for i in range(2, 131, 2)]
start_time = [i for i in range(0, 129, 2)]
def imputeTime(cols):
time = cols[0]
for i,time_i in enumerate(times):
if float(time)<=float(time_i):
return str(start_time[i])+'-'+str(time_i)
data['remaining_time'] = data[['time_min.1']].apply(imputeTime, axis=1).astype(str)
data.drop(['time_min.1','location_y', 'shot_basics', 'lat/lng','power_of_shot.1','distance_of_shot.1',
'knockout_match.1','distance_of_shot.1', 'range_of_shot', 'type_of_shot',
'match_event_id', 'team_name', 'team_id', 'match_id', 'date_of_game',
'home/away', 'remaining_min', 'remaining_min.1', 'remaining_sec',
'remaining_sec.1'],
axis=1,inplace=True)
data.apply(lambda x:sum(x.isnull()))
data.apply(lambda x:len(x.unique()))
# Create broad seasons of 4 years each
seasons = ['2000','2004', '2008','2012','2016']
start_seasons = ['1996','2001','2005', '2009', '2013']
def imputSeason(cols):
season=cols[0]
for i,year in enumerate(seasons):
if year>=season[:4]:
return start_seasons[i]+'-'+year[-2:]
data['game_season_broad'] = data[['game_season']].apply(imputSeason, axis=1).astype(str)
data.drop(['game_season'],axis=1, inplace=True)
# Label Encoding
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
vars = ['area_of_shot', 'home_or_away',
'type_of_combined_shot',
'game_season_broad', 'remaining_time']
for var in vars:
data[var]=le.fit_transform(data[var])
# Correalation table(matrix)
cor = data.corr( method='pearson')
# OneHotEncoding
data=pd.get_dummies(data, columns=vars)
# Save modified data
data.to_csv("modified_data.csv", index=False)
# Read dataset
data_all = | pd.read_csv("modified_data.csv") | pandas.read_csv |
import os
import sqlite3
import pandas as pd
import datetime
import numpy as np
import wget
def updateModisDB(filenames, cacheDir):
if len(filenames) > 0:
db_fn = os.path.join(cacheDir, "modis_db.db")
fn = filenames[0].split(os.sep)[-1]
product = fn.split('.')[0]
years = []
doys = []
tiles = []
fns = []
for filename in filenames:
fn = filename.split(os.sep)[-1]
fns.append(filename)
years.append(fn.split('.')[1][1:5])
doys.append(fn.split('.')[1][5:9])
tiles.append(fn.split('.')[2])
if not os.path.exists(db_fn):
conn = sqlite3.connect(db_fn)
modis_dict = {"TILE": tiles, "YEAR": years, "DOY": doys, "filename": fns}
modis_df = pd.DataFrame.from_dict(modis_dict)
modis_df.to_sql("%s" % product, conn, if_exists="replace", index=False)
conn.close()
else:
conn = sqlite3.connect(db_fn)
orig_df = pd.read_sql_query("SELECT * from %s" % product, conn)
modis_dict = {"TILE": tiles, "YEAR": years, "DOY": doys, "filename": fns}
modis_df = | pd.DataFrame.from_dict(modis_dict) | pandas.DataFrame.from_dict |
# Copyright [2020] [Two Six Labs, LLC]
# Licensed under the Apache License, Version 2.0
from flask import current_app, render_template, Blueprint, request
import pandas as pd
from app_deploy_data.authentication import auth
from utility.constants import (
INDEX_COLUMN,
UPLOAD_ID,
DATA_SOURCE_TYPE,
MAIN_DATA_SOURCE,
TABLE_COLUMN_SEPARATOR,
NOTES,
USERNAME,
DATA_SOURCE,
CSVFILE,
)
from utility.exceptions import ValidationError
UPLOAD_HTML = "data_upload.html"
upload_blueprint = Blueprint("upload", __name__)
def validate_data_form(request_form, request_files):
"""
We want to validate a submitted form whether it comes from the HTML page or an API
request. This function checks the form values, not the content of the upload.
:param request_form: flask request.form object
:param request_files: flask request.files object
:return: required fields
"""
try:
notes = request_form[
NOTES
] # require this field to be here, but no other checks
username = request_form[USERNAME]
data_source_name = request_form[DATA_SOURCE]
# todo: validate file types in request_files
except KeyError as e:
raise ValidationError(e)
current_app.logger.info(f"POST {username} {data_source_name}")
return username, data_source_name, request_files
def validate_submission_content(csvfile, data_source_schema):
"""
This function validates the contents of an uplaoded file against the expected schema
Raises ValidationError if the file does not have the correct format/data types
:param csvfile: request.file from flask for the uploaded file
:param data_source_schema: sqlalchemy columns to use for validation
:return: pandas dataframe of the uploaded csv
"""
try:
filename = csvfile.filename
df = | pd.read_csv(csvfile, sep=",", comment="#") | pandas.read_csv |
# Copyright 2019 Elasticsearch BV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# File called _pytest for PyCharm compatability
import pandas as pd
import pytest
from eland.dataframe import DEFAULT_NUM_ROWS_DISPLAYED
from eland.tests.common import TestData, assert_pandas_eland_series_equal
class TestDataFrameRepr(TestData):
@classmethod
def setup_class(cls):
# conftest.py changes this default - restore to original setting
pd.set_option("display.max_rows", 60)
"""
to_string
"""
def test_simple_lat_lon(self):
"""
Note on nested object order - this can change when
note this could be a bug in ES...
PUT my_index/doc/1
{
"location": {
"lat": "50.033333",
"lon": "8.570556"
}
}
GET my_index/_search
"_source": {
"location": {
"lat": "50.033333",
"lon": "8.570556"
}
}
GET my_index/_search
{
"_source": "location"
}
"_source": {
"location": {
"lon": "8.570556",
"lat": "50.033333"
}
}
Hence we store the pandas df source json as 'lon', 'lat'
"""
pd_dest_location = self.pd_flights()["DestLocation"].head(1)
ed_dest_location = self.ed_flights()["DestLocation"].head(1)
assert_pandas_eland_series_equal(pd_dest_location, ed_dest_location)
def test_num_rows_to_string(self):
# check setup works
assert pd.get_option("display.max_rows") == 60
# Test eland.DataFrame.to_string vs pandas.DataFrame.to_string
# In pandas calling 'to_string' without max_rows set, will dump ALL rows
# Test n-1, n, n+1 for edge cases
self.num_rows_to_string(DEFAULT_NUM_ROWS_DISPLAYED - 1)
self.num_rows_to_string(DEFAULT_NUM_ROWS_DISPLAYED)
with pytest.warns(UserWarning):
# UserWarning displayed by eland here (compare to pandas with max_rows set)
self.num_rows_to_string(
DEFAULT_NUM_ROWS_DISPLAYED + 1, None, DEFAULT_NUM_ROWS_DISPLAYED
)
# Test for where max_rows lt or gt num_rows
self.num_rows_to_string(10, 5, 5)
self.num_rows_to_string(100, 200, 200)
def num_rows_to_string(self, rows, max_rows_eland=None, max_rows_pandas=None):
ed_flights = self.ed_flights()[["DestLocation", "OriginLocation"]]
pd_flights = self.pd_flights()[["DestLocation", "OriginLocation"]]
ed_head = ed_flights.head(rows)
pd_head = pd_flights.head(rows)
ed_head_str = ed_head.to_string(max_rows=max_rows_eland)
pd_head_str = pd_head.to_string(max_rows=max_rows_pandas)
# print("\n", ed_head_str)
# print("\n", pd_head_str)
assert pd_head_str == ed_head_str
def test_empty_dataframe_string(self):
ed_ecom = self.ed_ecommerce()
pd_ecom = self.pd_ecommerce()
ed_ecom_s = ed_ecom[ed_ecom["currency"] == "USD"].to_string()
pd_ecom_s = pd_ecom[pd_ecom["currency"] == "USD"].to_string()
assert ed_ecom_s == pd_ecom_s
"""
repr
"""
def test_num_rows_repr(self):
self.num_rows_repr(
pd.get_option("display.max_rows") - 1, pd.get_option("display.max_rows") - 1
)
self.num_rows_repr(
pd.get_option("display.max_rows"), pd.get_option("display.max_rows")
)
self.num_rows_repr(
| pd.get_option("display.max_rows") | pandas.get_option |
"""
This script contains experiment set ups for results in figure 1.
"""
import os
import pandas as pd
from experiment_Setup import Experiment_Setup
from agent_env import get_pi_env
from SVRG import *
if __name__ == '__main__':
NUM_RUNS = 10
# Random MDP
alg_settings = [
{"method": svrg_classic, "name": "svrg", "sigma_theta": 1e-3, "sigma_omega": 1e-3,
"grid_search": False, "record_per_dataset_pass": False, "record_per_epoch":True,
"num_epoch":50, "num_checks":10, "inner_loop_multiplier":1
},
{"method": batch_svrg, "name": 'batch_svrg', "sigma_theta": 1e-3, 'sigma_omega': 1e-3,
"grid_search": False, "record_per_dataset_pass": False, "record_per_epoch": True,
"num_epoch": 50, "num_checks": 10, "inner_loop_multiplier": 1,
"batch_svrg_init_ratio": 0.1, "batch_svrg_increment_ratio": 1.05},
]
results = []
for i in range(NUM_RUNS):
exp_setup = Experiment_Setup(num_epoch=50, exp_settings=alg_settings, saving_dir_path="./",
multi_process_exps=False, use_gpu=False, num_processes=1,
batch_size=100, num_workers=0)
pi_env = get_pi_env(env_type="rmdp", exp_setup=exp_setup, loading_path="", is_loading=False, saving_path="./", is_saving=True,
policy_iteration_episode=1, init_method="zero", num_data=5000)
results.extend(pi_env.run_policy_iteration())
pd.DataFrame(pi_results).to_pickle('./rmdp_results.pkl')
# Mountain Car
alg_settings = [
{"method": svrg_classic, "name": "svrg", "sigma_theta": 1e-1, "sigma_omega": 1e-1,
"grid_search": False, "record_per_dataset_pass": False, "record_per_epoch":True,
"num_epoch":20, "num_checks":10, "inner_loop_multiplier":1
},
{"method": batch_svrg, "name": 'batch_svrg', "sigma_theta": 1e-1, 'sigma_omega': 1e-1,
"grid_search": False, "record_per_dataset_pass": False, "record_per_epoch": True,
"num_epoch": 20, "num_checks": 10, "inner_loop_multiplier": 1,
"batch_svrg_init_ratio": 0.2, "batch_svrg_increment_ratio": 1.1},
]
results = []
for i in range(NUM_RUNS):
exp_setup = Experiment_Setup(num_epoch=20, exp_settings=alg_settings, saving_dir_path="./",
multi_process_exps=False, use_gpu=False, num_processes=1,
batch_size=100, num_workers=0)
pi_env = get_pi_env(env_type="mc", exp_setup=exp_setup, loading_path="", is_loading=False, saving_path="./", is_saving=True,
policy_iteration_episode=1, init_method="zero", num_data=5000)
results.extend(pi_env.run_policy_iteration())
pd.DataFrame(pi_results).to_pickle('./mc_results.pkl')
# Cart Pole
alg_settings = [
{"method": svrg_classic, "name": "svrg", "sigma_theta": 1, "sigma_omega": 1,
"grid_search": False, "record_per_dataset_pass": False, "record_per_epoch":True,
"num_epoch":50, "num_checks":10, "inner_loop_multiplier":1
},
{"method": batch_svrg, "name": 'batch_svrg', "sigma_theta": 1, 'sigma_omega': 1,
"grid_search": False, "record_per_dataset_pass": False, "record_per_epoch": True,
"num_epoch": 50, "num_checks": 10, "inner_loop_multiplier": 1,
"batch_svrg_init_ratio": 0.1, "batch_svrg_increment_ratio": 1.05},
]
results = []
for i in range(NUM_RUNS):
exp_setup = Experiment_Setup(num_epoch=50, exp_settings=alg_settings, saving_dir_path="./",
multi_process_exps=False, use_gpu=False, num_processes=1,
batch_size=100, num_workers=0)
pi_env = get_pi_env(env_type="cp", exp_setup=exp_setup, loading_path="", is_loading=False, saving_path="./", is_saving=True,
policy_iteration_episode=1, init_method="zero", num_data=5000)
results.extend(pi_env.run_policy_iteration())
| pd.DataFrame(pi_results) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
NbrOfNodes = 35
gain = [0.01,0.02,0.03,0.04]
#--------------------------------------------------------------------------
# File for oversizing 5%
#--------------------------------------------------------------------------
thefileso05 = ['stent_g01cn25.rsn','stent_g02cn25.rsn','stent_g03cn25.rsn','stent_g04cn25.rsn']
mesh1_data05 = {}
mesh0_data05 = {}
for file in thefileso05:
myfile = open(file, 'r')
file_aux = myfile.readlines()
file_array = | pd.Series(file_aux) | pandas.Series |
import pandas as pd
import numpy as np
class PreProcessing:
data = None
quarter_names = None
num_years = None
num_days = None
def __init__(self, name):
name= str(name)
self.get_data(name)
self.data['Normalized_Close'] = self.normalized_data_col(self.data)
self.data['Quarter'] = self.get_quarter_col(self.data)
self.num_days = 252
self.prices_by_year = self.get_prices_by_year()
self.quarter_length = int(self.num_days / 4)
def get_prices_by_year(self):
df = self.modify_first_year_data()
for i in range(1, len(self.num_years)):
df = pd.concat([df, pd.DataFrame(self.get_year_data(year=self.num_years[i], normalized=True))], axis=1)
df = df[:self.num_days]
quarter_col = []
num_days_in_quarter = self.num_days // 4
for j in range(0, len(self.quarter_names)):
quarter_col.extend([self.quarter_names[j]]*num_days_in_quarter)
quarter_col = pd.DataFrame(quarter_col)
df = pd.concat([df, quarter_col], axis=1)
df.columns = self.num_years + ['Quarter']
df.index.name = 'Day'
df = self.fill_nans_with_mean(df)
return df
def get_year_data(self, year, normalized=True):
year = int(year)
if year not in self.num_years:
raise ValueError('\n' +
'Input year: {} not in available years: {}'.format(year, self.num_years))
prices = (self.data.loc[self.data['Date'].dt.year == year])
if normalized:
return np.asarray(prices.loc[:, 'Normalized_Close'])
else:
return np.asarray(prices.loc[:, 'Adj Close'])
def get_adj_close_prices(self, start_year, end_year):
start_year,end_year = int(start_year), int(end_year)
if start_year < self.num_years[0] or end_year > self.num_years[-1]:
raise ValueError('\n' +
'Incorrect data! \n' +
'Max range available: {}-{}\n'.format(self.num_years[0], self.num_years[-1]) +
'Was: {}-{}'.format(start_year, end_year))
df = (self.data.loc[(self.data['Date'].dt.year >= start_year) & (self.data['Date'].dt.year <= end_year)])
df = df.loc[:, ['Date', 'Adj Close']]
return df
def get_data(self, file_name):
file_name = str(file_name)
self.data = pd.read_csv('Data/' + file_name + '.csv')
self.data = self.data.iloc[:, [0, 5]]
self.data = self.data.dropna()
self.data.Date = | pd.to_datetime(self.data.Date) | pandas.to_datetime |
import sqlite3
import pandas as pd
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
from bokeh.plotting import figure, ColumnDataSource, show
from bokeh.models import HoverTool
import numpy as np
import scipy as sp
from pylab import plot,show
from numpy import vstack,array
from numpy.random import rand
from scipy.cluster.vq import kmeans,vq
from mpl_toolkits.mplot3d import Axes3D
from bokeh.io import output_notebook
import matplotlib.pyplot as plt
import seaborn as sns
database = './data/database.sqlite'
conn = sqlite3.connect(database)
cur = conn.cursor()
query = "SELECT name FROM sqlite_master WHERE type='table';"
pd.read_sql(query, conn)
query = "SELECT * FROM Player;"
a = | pd.read_sql(query, conn) | pandas.read_sql |
import logging
from pathlib import Path
import click
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer, KNNImputer
from sklearn.preprocessing import StandardScaler
def engineer_features(df: pd.DataFrame) -> pd.DataFrame:
"""Transform the data by imputing and creating new features that will be useful for
prediction.
Performs the following steps:
1. Converts ``Survived`` to 0/1 rather than ``True``/``False``
2. Creates an indicator for missing ``Cabin`` information
3. Extracts titles from ``Name``
4. Defines ``Relatives``, the sum of ``SibSp`` and ``Parch``
5. Bins ``Relatives``, ``SibSp`` and ``Parch``
6. Drops ``Name``, ``Ticket`` and ``Cabin``
7. Imputes ``Embarked`` using the most frequent embarkation port
8. Imputes ``Age`` and ``Fare`` based on 5-nearest neighbors
9. Scales ``Age`` and ``Fare``
10. Bins ``Age``
11. One-hot encodes categorical variables
Args:
df: Cleaned and combined data output by :func:`~titanic.data.clean.make_dataset`
Returns:
pd.DataFrame: The processed data
"""
# convert Survived to 0/1 rather than True/False
df['Survived'] = df['Survived'].astype(float)
# create indicator for missing cabin information
df['has_cabin'] = (~df['Cabin'].isnull()).astype(float)
# extract titles
titles = df['Name'].str.extract('^.*, (.*?)\\.', expand=False)
df['Title'] = titles.where(titles.isin(['Mr', 'Miss', 'Mrs', 'Master']), 'Other')
# create "relatives" feature, the sum of SibSp and Parch
df['Relatives'] = pd.cut(df['SibSp'] + df['Parch'], bins=[-1, 0, 3, np.Inf], labels=['0', '1-3', '4+'])
# bin SibSp and Parch
df['SibSp'] = pd.cut(df['SibSp'], bins=[-1, 0, 1, np.Inf], labels=['0', '1', '2+'])
df['Parch'] = pd.cut(df['Parch'], bins=[-1, 0, np.Inf], labels=['0', '1+'])
# drop unnecessary features
df.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
# impute Embarked using the most frequently appearing port
df['Embarked'] = SimpleImputer(strategy='most_frequent').fit_transform(df[['Embarked']])
# do one-hot encoding
df = pd.get_dummies(df, drop_first=True)
# impute age and fare using k nearest neighbors
impute_age = KNNImputer(n_neighbors=5, weights='distance')
df_num = df.select_dtypes(include='number')
df_num = pd.DataFrame(impute_age.fit_transform(df_num), index=df_num.index, columns=df_num.columns)
df[['Age', 'Fare']] = df_num[['Age', 'Fare']]
# bin age now that it has been imputed
df['AgeBin'] = pd.cut(df['Age'],
bins=[-1, 6, 14, 25, 45, np.Inf],
labels=['0-6', '7-14', '15-25', '26-45', '45+'])
# do one-hot encoding again to incorporate age bins
df = | pd.get_dummies(df, drop_first=True) | pandas.get_dummies |
import numpy as np
import urllib
import os
import argparse
from sklearn.cross_validation import train_test_split
from astroML.plotting import setup_text_plots
import empiriciSN
from MatchingLensGalaxies_utilities import *
from astropy.io import fits
import GCRCatalogs
import pandas as pd
from GCR import GCRQuery
sys.path.append('/global/homes/b/brycek/DC2/sims_GCRCatSimInterface/workspace/sed_cache/')
from SedFitter import sed_from_galacticus_mags
from lsst.sims.photUtils import Sed, Bandpass, BandpassDict
def get_sl2s_data():
filename = os.path.join(os.environ['TWINKLES_DIR'], 'data',
'SonnenfeldEtal2013_Table3.csv')
z = np.array([])
z_err = np.array([])
v_disp = np.array([])
v_disp_err = np.array([])
r_eff = np.array([])
r_eff_err = np.array([])
log_m = np.array([])
log_m_err = np.array([])
infile = open(filename, 'r')
inlines = infile.readlines()
for line1 in inlines:
if line1[0] == '#': continue
line = line1.split(',')
#Params
z = np.append(z, float(line[1]))
v_disp = np.append(v_disp, float(line[2]))
r_eff = np.append(r_eff, float(line[3]))
log_m = np.append(log_m, float(line[4]))
#Errors
z_err = np.append(z_err, float(line[5]))
v_disp_err = np.append(v_disp_err, float(line[6]))
r_eff_err = np.append(r_eff_err, float(line[7]))
log_m_err = np.append(log_m_err, float(line[8]))
#Build final arrays
X = np.vstack([z, v_disp, r_eff, log_m]).T
Xerr = np.zeros(X.shape + X.shape[-1:])
diag = np.arange(X.shape[-1])
Xerr[:, diag, diag] = np.vstack([z_err**2, v_disp_err**2,
r_eff_err**2, log_m_err**2]).T
return X, Xerr
#Write new conditioning function
def get_log_m(cond_indices, m_index, X, model_file, Xerr=None):
"""
Uses a subset of parameters in the given data to condition the
model and return a sample value for log(M/M_sun).
Parameters
----------
cond_indices: array_like
Array of indices indicating which parameters to use to
condition the model.
m_index: int
Index of log(M/M_sun) in the list of parameters that were used
to fit the model.
X: array_like, shape = (n < n_features,)
Input data.
Xerr: array_like, shape = (X.shape,) (optional)
Error on input data. If none, no error used to condition.
Returns
-------
log_m: float
Sample value of log(M/M_sun) taken from the conditioned model.
Notes
-----
The fit_params array specifies a list of indices to use to
condition the model. The model will be conditioned and then
a mass will be drawn from the conditioned model.
This is so that the mass can be used to find cosmoDC2 galaxies
to act as hosts for OM10 systems.
This does not make assumptions about what parameters are being
used in the model, but does assume that the model has been
fit already.
"""
if m_index in cond_indices:
raise ValueError("Cannot condition model on log(M/M_sun).")
cond_data = np.array([])
if Xerr is not None: cond_err = np.array([])
m_cond_idx = m_index
n_features = empiricist.XDGMM.mu.shape[1]
j = 0
for i in range(n_features):
if i in cond_indices:
cond_data = np.append(cond_data,X[j])
if Xerr is not None: cond_err = np.append(cond_err, Xerr[j])
j += 1
if i < m_index: m_cond_idx -= 1
else:
cond_data = np.append(cond_data,np.nan)
if Xerr is not None: cond_err = np.append(cond_err, 0.0)
if Xerr is not None:
cond_XDGMM = empiricist.XDGMM.condition(cond_data, cond_err)
else: cond_XDGMM = empiricist.XDGMM.condition(cond_data)
sample = cond_XDGMM.sample()
log_m = sample[0][m_cond_idx]
return log_m
def estimate_stellar_masses_om10():
# Instantiate an empiriciSN worker object:
empiricist = empiriciSN.Empiricist()
X, Xerr = get_sl2s_data()
# Load in cached om10 catalog
filename = filename = os.path.join(os.environ['TWINKLES_DIR'], 'data', 'om10_qso_mock.fits')
hdulist = fits.open(filename)
twinkles_lenses = hdulist[1].data
# Predict a mass for each galaxy:
np.random.seed(0)
cond_indices = np.array([0,1])
twinkles_log_m_1comp = np.array([])
model_file='demo_model.fit'
empiricist.fit_model(X, Xerr, filename = 'demo_model.fit', n_components=1)
twinkles_data = np.array([twinkles_lenses['ZLENS'], twinkles_lenses['VELDISP']]).T
for x in twinkles_data:
log_m = get_log_m(cond_indices, 2, x[cond_indices], model_file)
twinkles_log_m_1comp = np.append(twinkles_log_m_1comp,log_m)
return twinkles_lenses, log_m, twinkles_log_m_1comp
def get_catalog(catalog, twinkles_lenses, twinkles_log_m_1comp):
gcr_om10_match = []
err = 0
np.random.seed(10)
i = 0
z_cat_min = np.power(10, np.log10(np.min(twinkles_lenses['ZLENS'])) - .1)
z_cat_max = np.power(10, np.log10(np.max(twinkles_lenses['ZLENS'])) + .1)
stellar_mass_cat_min = np.min(np.power(10, twinkles_log_m_1comp))*0.9
stellar_mass_cat_max = np.max(np.power(10, twinkles_log_m_1comp))*1.1
data = catalog.get_quantities(['galaxy_id', 'redshift_true', 'stellar_mass', 'ellipticity_true', 'size_true', 'size_minor_true',
'stellar_mass_bulge', 'stellar_mass_disk', 'size_bulge_true', 'size_minor_bulge_true'],
filters=['stellar_mass > %f' % stellar_mass_cat_min, 'stellar_mass < %f' % stellar_mass_cat_max,
'redshift_true > %f' % z_cat_min, 'redshift_true < %f' % z_cat_max,
'stellar_mass_bulge/stellar_mass > 0.99'])
#### Important Note
# Twinkles issue #310 (https://github.com/LSSTDESC/Twinkles/issues/310) says OM10 defines ellipticity as 1 - b/a but
# gcr_catalogs defines ellipticity as (1-b/a)/(1+b/a) (https://github.com/LSSTDESC/gcr-catalogs/blob/master/GCRCatalogs/SCHEMA.md)
data['om10_ellipticity'] = (1-(data['size_minor_bulge_true']/data['size_bulge_true']))
data_df = pd.DataFrame(data)
data_df.to_csv('om10_matching_checkpoint_1.csv', index=False)
def match_to_cat(twinkles_lenses, twinkles_log_m_1comp, data_df):
row_num = -1
keep_rows = []
for zsrc, m_star, ellip in zip(twinkles_lenses['ZLENS'], np.power(10, twinkles_log_m_1comp), twinkles_lenses['ELLIP']):
row_num += 1
#print(zsrc, m_star, ellip)
if row_num % 1000 == 0:
print(row_num)
z_min, z_max = np.power(10, np.log10(zsrc) - .1), np.power(10, np.log10(zsrc) + .1)
m_star_min, m_star_max = m_star*.9, m_star*1.1
ellip_min, ellip_max = ellip*.9, ellip*1.1
data_subset = data_df.query('redshift_true > %f and redshift_true < %f and stellar_mass > %f and stellar_mass < %f and om10_ellipticity > %f and om10_ellipticity < %f' %
(z_min, z_max, m_star_min, m_star_max, ellip_min, ellip_max))
#data = catalog.get_quantities(['redshift_true', 'stellar_mass', 'ellipticity_true'])
#data_subset = (query).filter(data)
#print(data_subset)
num_matches = len(data_subset['redshift_true'])
if num_matches == 0:
err += 1
continue
elif num_matches == 1:
gcr_data = [data_subset['redshift_true'].values[0],
data_subset['stellar_mass_bulge'].values[0],
data_subset['om10_ellipticity'].values[0],
data_subset['size_bulge_true'].values[0],
data_subset['size_minor_bulge_true'].values[0],
data_subset['galaxy_id'].values[0]]
gcr_om10_match.append(gcr_data)
keep_rows.append(row_num)
elif num_matches > 1:
use_idx = np.random.choice(num_matches)
gcr_data = [data_subset['redshift_true'].values[use_idx],
data_subset['stellar_mass_bulge'].values[use_idx],
data_subset['om10_ellipticity'].values[use_idx],
data_subset['size_bulge_true'].values[use_idx],
data_subset['size_minor_bulge_true'].values[use_idx],
data_subset['galaxy_id'].values[use_idx]]
gcr_om10_match.append(gcr_data)
keep_rows.append(row_num)
print("Total Match Failures: ", err, " Percentage Match Failures: ", np.float(err)/len(twinkles_log_m_1comp))
np.savetxt('gcr_om10_match.dat', gcr_om10_match)
def get_catalog_mags(catalog):
H0 = catalog.cosmology.H0.value
Om0 = catalog.cosmology.Om0
sed_label = []
sed_min_wave = []
sed_wave_width = []
for quant_label in sorted(catalog.list_all_quantities()):
if (quant_label.startswith('sed') and quant_label.endswith('bulge')):
sed_label.append(quant_label)
label_split = quant_label.split('_')
sed_min_wave.append(int(label_split[1])/10)
sed_wave_width.append(int(label_split[2])/10)
bin_order = np.argsort(sed_min_wave)
sed_label = np.array(sed_label)[bin_order]
sed_min_wave = np.array(sed_min_wave)[bin_order]
sed_wave_width = np.array(sed_wave_width)[bin_order]
for i in zip(sed_label, sed_min_wave, sed_wave_width):
print(i)
columns = ['galaxy_id', 'redshift_true', 'mag_u_lsst', 'mag_g_lsst', 'mag_r_lsst',
'mag_i_lsst', 'mag_z_lsst', 'mag_y_lsst']
for sed_bin in sed_label:
columns.append(sed_bin)
data = catalog.get_quantities(columns,
filters=['stellar_mass > %f' % stellar_mass_cat_min, 'stellar_mass < %f' % stellar_mass_cat_max,
'redshift_true > %f' % z_cat_min, 'redshift_true < %f' % z_cat_max,
'stellar_mass_bulge/stellar_mass > 0.99'])
data_df = | pd.DataFrame(data) | pandas.DataFrame |
"""
Description: functions for text preprocessing
Author: <NAME>. @ AI - Camp
Date: Spring 2022
"""
import sys
root = r"C:\Users\45323\Desktop\新python文件夹\AI_Camp\AICS_Bert"
sys.path.append(root)
import config
import pandas as pd
from aug_helper_func import load_dfs_from_folder, get_specific_label_dfs, add_primitive_data, augmentation_train_test_split
from text_preprocessing import clean_df, map_labels
from Label2Id import Label2Id
from transformers import BertTokenizer
from dataloader import EmailsDataset, get_label_distribution, dataloader
from transformer import BertUncased
import config
from train import training
if __name__ == '__main__':
# NOTE: Load Data
aug_dfs = load_dfs_from_folder(config.augmented_path, 'augmented_text', 'label')
df = | pd.read_csv(config.data_path) | pandas.read_csv |
from __future__ import annotations
from typing import Optional, Dict, List, Union, Type, TYPE_CHECKING
from datetime import date, datetime
import pandas as pd
import numpy as np
import re
import locale
try:
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
except locale.Error:
# Readthedocs has a problem, but difficult to replicate
locale.setlocale(locale.LC_ALL, "")
from . import CoreScript
from ..models import ColumnModel
from ..types import MimeType
if TYPE_CHECKING:
from ..schema import Schema
from ..models import DataSourceModel
class WranglingScript:
"""Get, review and restructure tabular data."""
def __init__(self):
self.check_source = CoreScript().check_source
self.core = CoreScript()
self.DATE_FORMATS = {
"date": {"fmt": ["%Y-%m-%d"], "txt": ["YYYY-MM-DD"]},
"datetime": {
"fmt": ["%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M:%S %Z%z"],
"txt": ["YYYY-MM-DD hh:mm:ss", "YYYY-MM-DD hh:mm:ss UTC+0000"],
},
"year": {"fmt": ["%Y"], "txt": ["YYYY"]},
}
def get_dataframe(
self,
source: str,
preserve: Union[str, List[str]] = None,
filetype: MimeType = MimeType.CSV,
names: Optional[List[str]] = None,
nrows: Optional[int] = None,
) -> Union[Dict[str, pd.DataFrame], pd.DataFrame]:
"""Return a Pandas dataframe from a given source.
Accepts default pandas parameters for Excel and CSV, but the objective is to preserve the source data with
little data conversion outside of the data wrangling process. With this in mind, a
Parameters
----------
source: str
Source filename.
preserve: str or list of str, default None
Column names where variable type guessing must be prevented and the original data preserved.
Critical for foreign key references with weird formats, like integers with leading `0`.
filetype: MimeType, default MimeType.CSV
Pandas can read a diversity of filetypes, but whyqd has only been tested on `xls`, `xlsx` and `csv`.
names: list of str, default None
If the source data has no header row, explicitly pass a list of names - in the correct order - to address
the data.
nrows: int, default None
A specified number of rows to return. For review, it is faster to load only a small number.
Returns
-------
DataFrame or dict of DataFrame
"""
self.check_source(source)
# If the dtypes have not been set, then ensure that any provided preserved columns remain untouched
# i.e. no forcing of text to numbers
# defaulting to `dtype = object` ...
kwargs = {}
if preserve:
if not isinstance(preserve, list):
preserve = [preserve]
# kwargs["dtype"] = {k: object for k in preserve}
kwargs["dtype"] = {k: pd.StringDtype() for k in preserve}
if names:
kwargs["header"] = None
kwargs["names"] = names
if nrows:
kwargs["nrows"] = nrows
# Check filetype
if filetype in [MimeType.XLS, MimeType.XLSX]:
# This will default to returning a dictionary of dataframes for each sheet
kwargs["sheet_name"] = None
df = pd.read_excel(source, **kwargs)
keys = list(df.keys())
for k in keys:
if df[k].empty:
del df[k]
if len(df.keys()) == 1:
df = df[keys[0]]
if filetype == MimeType.CSV:
# New in pandas 1.3: will ignore encoding errors - perfect for this initial wrangling process
kwargs["encoding_errors"] = "ignore"
# Supposed to help with fruity separater guessing
kwargs["engine"] = "python"
if not nrows:
df = pd.read_csv(source, **kwargs)
else:
kwargs["iterator"] = True
kwargs["chunksize"] = 10000
df_iterator = pd.read_csv(source, **kwargs)
df = pd.concat(df_iterator, ignore_index=True)
return df
def get_dataframe_from_datasource(self, data: DataSourceModel) -> pd.DataFrame:
"""Return the dataframe for a data source.
Parameters
----------
data: DataSourceModel
Returns
-------
pd.DataFrame
"""
path = data.path
try:
self.core.check_source(path)
except FileNotFoundError:
path = str(self.directory / data.source)
self.core.check_source(path)
df_columns = [d.name for d in data.columns]
names = [d.name for d in data.names] if data.names else None
df = self.get_dataframe(
source=path,
filetype=data.mime,
names=names,
preserve=[d.name for d in data.preserve if d.name in df_columns],
)
if isinstance(df, dict):
if df:
df = df[data.sheet_name]
else:
# It's an empty df for some reason. Maybe excessive filtering.
df = pd.DataFrame()
if df.empty:
raise ValueError(
f"Data source contains no data ({data.path}). Review actions to see if any were more destructive than expected."
)
return df
def get_dataframe_columns(self, df: pd.DataFrame) -> List(ColumnModel):
"""Returns a list of ColumnModels from a source DataFrame.
Parameters
----------
df: pd.DataFrame
Should be derived from `get_dataframe` with a sensible default for `nrows` being 50.
Returns
-------
List of ColumnModel
"""
# Prepare summary
columns = [
{"name": k, "type": "number"}
if v in ["float64", "int64"]
else {"name": k, "type": "date"}
if v in ["datetime64[ns]"]
else {"name": k, "type": "string"}
for k, v in df.dtypes.apply(lambda x: x.name).to_dict().items()
]
return [ColumnModel(**c) for c in columns]
def deduplicate_columns(self, df: pd.DataFrame, schema: Type[Schema]) -> pd.Index:
"""
Source: https://stackoverflow.com/a/65254771/295606
Source: https://stackoverflow.com/a/55405151
Returns a new column list permitting deduplication of dataframes which may result from merge.
Parameters
----------
df: pd.DataFrame
fields: list of FieldModel
Destination Schema fields
Returns
-------
pd.Index
Updated column names
"""
column_index = pd.Series(df.columns.tolist())
if df.columns.has_duplicates:
duplicates = column_index[column_index.duplicated()].unique()
for name in duplicates:
dups = column_index == name
replacements = [f"{name}{i}" if i != 0 else name for i in range(dups.sum())]
column_index.loc[dups] = replacements
# Fix any fields with the same name as any of the target fields
# Do this to 'force' schema assignment
for name in [f.name for f in schema.get.fields]:
dups = column_index == name
replacements = [f"{name}{i}__dd" if i != 0 else f"{name}__dd" for i in range(dups.sum())]
column_index.loc[dups] = replacements
return pd.Index(column_index)
# def check_column_unique(self, source: str, key: str) -> bool:
# """
# Test a column in a dataframe to ensure all values are unique.
# Parameters
# ----------
# source: Source filename
# key: Column name of field where data are to be tested for uniqueness
# Raises
# ------
# ValueError if not unique
# Returns
# -------
# bool, True if unique
# """
# df = self.get_dataframe(source, key)
# if len(df[key]) != len(df[key].unique()):
# import warnings
# filename = source.split("/")[-1] # Obfuscate the path
# e = "'{}' contains non-unique rows in column `{}`".format(filename, key)
# # raise ValueError(e)
# warnings.warn(e)
# return True
# def check_date_format(self, date_type: str, date_value: str) -> bool:
# # https://stackoverflow.com/a/37045601
# # https://www.saltycrane.com/blog/2009/05/converting-time-zones-datetime-objects-python/
# for fmt in self.DATE_FORMATS[date_type]["fmt"]:
# try:
# if date_value == datetime.strptime(date_value, fmt).strftime(fmt):
# return True
# except ValueError:
# continue
# raise ValueError(f"Incorrect date format, should be: `{self.DATE_FORMATS[date_type]['txt']}`")
###################################################################################################
### Pandas type parsers
###################################################################################################
def parse_dates(self, x: Union[None, str]) -> Union[pd.NaT, date.isoformat]:
"""
This is the hard-won 'trust nobody', certainly not Americans, date parser.
TODO: Replace with https://github.com/scrapinghub/dateparser
The only concern is that dateparser.parse(x).date().isoformat() will coerce *any* string to a date,
no matter *what* it is.
"""
if pd.isnull(x):
return pd.NaT
# Check if to_datetime can handle things
if not pd.isnull( | pd.to_datetime(x, errors="coerce", dayfirst=True) | pandas.to_datetime |
import sys
import pandas as pd
import numpy as np
import catboost
DUR_RU = 'Длительность разговора с оператором, сек'
DUR_EN = 'oper_duration'
RU_COLS = [
'Время начала вызова', 'Время окончания вызова', 'Время постановки в очередь',
'Время переключения на оператора', 'Время окончания разговора с оператором',
]
EN_COLS = ['call_start_time', 'call_end_time', 'queue_time', 'oper_start_time', 'oper_end_time']
zero_time = pd.to_datetime('00:00:00')
SEC_PER_DAY = 60*60*24
NA_VALUE = -1.2345
def extract_features(data):
times = data[RU_COLS].apply(pd.to_datetime)
times.columns = EN_COLS
abs_times = times.apply(lambda x: (x - zero_time).dt.total_seconds()).fillna(NA_VALUE)
abs_times.columns = [c + '_abs' for c in EN_COLS]
day = abs_times / SEC_PER_DAY * 2 * np.pi
hour = (abs_times % (24 * 60)) / (24 * 60) * 2 * np.pi
minute = (abs_times % 60) / 60 * 2 * np.pi
day_sines = np.sin(day)
day_cosines = np.cos(day)
hour_sines = np.sin(hour)
hour_cosines = np.cos(hour)
minute_sines = np.sin(minute)
minute_cosines = np.cos(minute)
day_sines.columns = ['day_sin__' + c for c in EN_COLS]
day_cosines.columns = ['day_cos__' + c for c in EN_COLS]
hour_sines.columns = ['hour_sin__' + c for c in EN_COLS]
hour_cosines.columns = ['hour_cos__' + c for c in EN_COLS]
minute_sines.columns = ['minute_sin__' + c for c in EN_COLS]
minute_cosines.columns = ['minute_cos__' + c for c in EN_COLS]
null_times = times.isnull().astype(int)
null_times.columns = [c + "_miss" for c in EN_COLS]
diffs = | pd.DataFrame(index=times.index) | pandas.DataFrame |
from methylcapsnet.samplers import ImbalancedDatasetSampler
from pymethylprocess.MethylationDataTypes import MethylationArray
import numpy as np, pandas as pd
from captum.attr import GradientShap
import torch
from torch.utils.data import DataLoader, Dataset, TensorDataset, Subset, ConcatDataset
from torch.utils.data.sampler import SubsetRandomSampler
from methylcapsnet.methylcaps_data_models import *
def return_spw_importances_(train_methyl_array,
val_methyl_array,
interest_col,
select_subtypes,
capsules_pickle,
include_last,
n_bins,
spw_config,
model_state_dict_pkl,
batch_size,
by_subtype=False
):
ma=MethylationArray.from_pickle(train_methyl_array)
ma_v=MethylationArray.from_pickle(val_methyl_array)
try:
ma.remove_na_samples(interest_col)
ma_v.remove_na_samples(interest_col)
except:
pass
if select_subtypes:
ma.pheno=ma.pheno.loc[ma.pheno[interest_col].isin(select_subtypes)]
ma.beta=ma.beta.loc[ma.pheno.index]
ma_v.pheno=ma_v.pheno.loc[ma_v.pheno[interest_col].isin(select_subtypes)]
ma_v.beta=ma_v.beta.loc[ma_v.pheno.index]
capsules_dict=torch.load(capsules_pickle)
final_modules, modulecpgs, module_names = capsules_dict['final_modules'],capsules_dict['modulecpgs'],capsules_dict['module_names']
if not include_last:
ma.beta=ma.beta.loc[:,modulecpgs]
ma_v.beta=ma_v.beta.loc[:,modulecpgs]
original_interest_col=interest_col
if n_bins:
new_interest_col=interest_col+'_binned'
ma.pheno.loc[:,new_interest_col],bins= | pd.cut(ma.pheno[interest_col],bins=n_bins,retbins=True) | pandas.cut |
# Created by <NAME> at 2021/5/12
import pathlib
import numpy as np
import pandas as pd
import statsmodels.api as sm
from fracdiff import FracdiffStat
from scipy.stats import entropy
from sklearn.decomposition import PCA
from sklearn.impute import SimpleImputer
from sklearn.model_selection._split import _BaseKFold, indexable, _num_samples
from sklearn.preprocessing import PowerTransformer, MinMaxScaler, OneHotEncoder
from sklearn.utils.validation import _deprecate_positional_args
f = FracdiffStat()
p = PCA(n_components=1)
imp = SimpleImputer(missing_values=np.nan, strategy="mean")
def auto_label_encode(df, num=None, col=None, by=None):
"""
对序列进行分Bin编码
:param df:
:param col:
:param num:
:param by:
:return:
"""
if not col:
col = df.columns[0]
if not num:
num = 10
if by == 'entropy':
min_entropy = -float('inf')
best_num = None
for i in range(2, num):
cur_std = entropy(pd.cut(df[col], bins=i, ).value_counts().values)
print(cur_std)
if cur_std > min_entropy:
best_num = i
min_entropy = cur_std
print(best_num)
else:
best_num = num
df1 = pd.cut(df[col], bins=best_num, labels=[str(i) for i in range(best_num)])
df1.name = col
return df1.to_frame()
_auto_filter = ["rolling", "expanding", ...]
def auto_filter(df):
"""
自动对特征序列添加滤波 作为新特征?
:param df:
:return:
"""
return
def load_data():
file = pathlib.Path("data", "课题一数据.xls")
df = pd.read_excel(file, sheet_name="螺纹钢相关")
del df[df.columns[1]]
df["日期"] = pd.to_datetime(df["日期"])
df.set_index(df["日期"], inplace=True)
df['logret'] = 100 * df['RB:活跃合约'].apply(np.log).diff()
df2 = pd.read_excel(file, sheet_name="宏观数据", index_col=0)
df2.index = pd.to_datetime(df2.index)
# 固定资产投资完成额分行业同比(月)
df3 = pd.read_csv(pathlib.Path("data", "investment.csv"),
index_col=0)
df3.index = pd.DatetimeIndex(pd.to_datetime(df3.index))
# add more features ...
return df, df2, df3
def load_rb(start=None, end=None):
"""
加载合约价格
:param start:
:param end:
:return:
"""
file = pathlib.Path("data", "rb_continuous_contracts.csv")
df = pd.read_csv(file, index_col=0)
df.index = pd.DatetimeIndex(df.index)
if start == None and end == None:
df = df
else:
df = df.loc[start:end]
df = _cont_contract_price_adj(df)
return df
def _cont_contract_price_adj(df, method='ratio'):
"""
移除换月跳空
:param df:
:param method:
:return:
"""
df = df.copy()
df['close_adj'] = df['close']
df['settle_adj'] = df['settle']
prv_contract = None
for idx, row in df.reset_index().iterrows():
cur_contract = row.trade_hiscode
if prv_contract is None:
prv_contract = cur_contract
continue
if method == "ratio":
if cur_contract != prv_contract:
cur_close = row.close
cur_settle = row.settle
prv_close = df.iloc[idx - 1].close
prv_settle = df.iloc[idx - 1].settle
close_ratio = cur_close / prv_close
settle_ratio = cur_settle / prv_settle
df.iloc[:idx, -2] *= close_ratio
df.iloc[:idx, -1] *= settle_ratio
elif method == 'gap':
if cur_contract != prv_contract:
cur_close = row.close
cur_settle = row.settle
prv_close = df.iloc[idx - 1].close
prv_settle = df.iloc[idx - 1].settle
close_gap = cur_close - prv_close
settle_gap = cur_settle - prv_settle
df.iloc[:idx, -2] += close_gap
df.iloc[:idx, -1] += settle_gap
prv_contract = cur_contract
return df
def process_feature(file, start, end):
"""
加载特征数据,简单清洗 和 预处理
:param file:
:param start:
:param end:
:return:
"""
if 'CPI' in file:
return process_cpi(file, start, end)
elif 'inventory' in file:
return process_inventory(file, start, end)
elif 'investment' in file:
return process_sector_investment(file, start, end)
elif 'money' in file:
return process_money_supply(file, start, end)
elif 'PMI' in file:
return process_pmi(file, start, end)
elif 'PPI' in file:
return process_ppi(file, start, end)
elif 'SHIBOR' in file:
return process_shibor(file, start, end)
elif 'ta.csv' in file:
return process_ta(file, start, end)
elif "社会融资规模" in file:
return process_private_financing(file, start, end)
elif "freight_cost.csv" in file:
return process_freight_cost(file, start, end)
elif "google_trend_" in file:
return process_google_trend(file, start, end)
elif "foreign_price.csv" in file:
return process_foreign_price(file, start, end)
elif "spot_spread.csv" in file:
return process_spot_spread(file, start, end)
elif "mysteel_price_index.csv" in file:
return process_price_index(file, start, end)
elif "weather.csv" in file:
return process_weather(file, start, end)
elif "purchase_amount.csv" in file:
return process_purchase_amt(file, start, end)
elif "房地产开发、销售(月).csv" in file:
return process_real_estate(file, start, end)
def process_cpi(file, start, end):
'''
对 CPI 特征进行清洗
1. 更新特征
2. 平稳化 Fractional Difference算法
:param file:
:param start:
:param end:
:return:
'''
df = pd.read_csv(file, index_col=0)
df.index = pd.DatetimeIndex(df.index)
df.dropna(inplace=True)
df = df.shift(1) # 更新延迟
df.fillna(method='bfill', inplace=True)
df = df.loc[ : end]
cpi_LP = ['CPI:环比', 'CPI:不包括食品和能源(核心CPI):环比']
cpi_POP = df.columns.difference(cpi_LP)
df_new = _process_cpi(df, )
return df_new.loc[start: end]
def _process_cpi(df, ):
cpi_LP = ['CPI:环比', 'CPI:不包括食品和能源(核心CPI):环比']
cpi_POP = df.columns.difference(cpi_LP)
df_new = pd.DataFrame()
df_new['cpi_LP'] = f.fit_transform(p.fit_transform(df[cpi_LP])).flatten()
df_new['cpi_POP'] = f.fit_transform(p.fit_transform(df[['CPI:累计同比', 'CPI:不包括食品和能源(核心CPI):当月同比']])).flatten()
df_new.index = df.index
return df_new
def process_inventory(file, start, end):
"""
处理 社会螺纹钢库存指标
1. 去季节性,取 STL 分解后的残差
:param file:
:param start:
:param end:
:return:
"""
df = pd.read_csv(file, index_col=0)
df.index = pd.DatetimeIndex(df.index)
df.dropna(inplace=True)
df = df.loc[: end]
result = _process_inventory(df)
return result.loc[start: end]
def _process_inventory(df):
inventory_df = 100 * df.pct_change()
inventory_df.fillna(method='bfill', inplace=True)
inventory_resid = sm.tsa.STL(inventory_df['螺纹钢库存'], period=52).fit().resid
inventory_resid.name = 'spot_inventory_level_resid'
item = inventory_resid.to_frame()
return item
def process_sector_investment(file, start, end):
"""
在看盘软件叠加主力价格和黑色金属冶炼投资累计同比,后者对前者有一定领先。
:param file:
:param start:
:param end:
:return:
"""
df = pd.read_csv(file, index_col=0).shift(1).dropna()
df.index = pd.DatetimeIndex(df.index)
df = df.loc[: end]
investment_new = _process_sector_investment(df)
return investment_new.loc[start: end]
def _process_sector_investment(df):
investment_new = pd.DataFrame()
try:
investment_new['indus_invest'] = f.fit_transform(df.values).flatten()
except:
investment_new['indus_invest'] = (1+df.pct_change()).cumprod().values.flatten()
investment_new.index = df.index
return investment_new
def process_money_supply(file, start, end):
"""
处理货币供应特征 M1&M2
1. 平稳化 Fractional Difference算法处理
2. 发布延迟处理
:param file:
:param start:
:param end:
:return:
"""
df = pd.read_csv(file, index_col=0)
df = df.dropna()
df = df.shift(1)
df = df.dropna()
df.index = pd.DatetimeIndex(pd.to_datetime(df.index))
df = df.loc[:end]
money_new = pd.DataFrame()
try:
for col in df.columns:
money_new[col] = f.fit_transform(df[col].values.reshape(-1, 1)).flatten()
except:
money_new['money_supply'] = p.fit_transform(df[df.columns]).flatten()
money_new.index = df.index
return money_new.loc[start:end]
def process_pmi(file, start, end):
"""
处理 PMI 相关特征
1. PCA 降维
2. 发布延迟处理
:param file:
:param start:
:param end:
:return:
"""
df = pd.read_csv(file, index_col=0)
df = df.shift(1)
df.index = pd.to_datetime(df.index)
df = df.loc[:end]
pmi_new = pd.DataFrame()
pmi_new['pmi'] = p.fit_transform(imp.fit_transform(df)).flatten()
pmi_new.index = df.index
return pmi_new.loc[start:end]
def process_ppi(file, start, end):
"""
PPI 平稳处理
:param file:
:param start:
:param end:
:return:
"""
df = pd.read_csv(file, index_col=0)
df = df.dropna()
df = df.shift(1)
df = df.dropna()
df.index = pd.DatetimeIndex( | pd.to_datetime(df.index) | pandas.to_datetime |
import pandas as pd
import datetime
from typing import List, Dict
# Define hourly cost per line - regular, overtime and weekend
reg_costs_per_line = {"Line_1": 245, "Line_2": 315, "Line_3": 245}
lines: List[str] = list(reg_costs_per_line.keys())
# Get orders
customer_orders = pd.read_excel("Customer_orders.xlsx")
# Get cycle times
capacity = pd.read_excel("Constraints.xlsx", sheet_name="8h capacity").set_index("Line")
cycle_time = capacity.rdiv(8)
def check_duplicates(list_to_check):
if len(list_to_check) == len(set(list_to_check)):
return
else:
print("Duplicate order, please check the requirements file")
exit()
return
order_list = customer_orders["Order"].to_list()
check_duplicates(order_list)
# Create cycle times dictionnary
customer_orders = customer_orders.merge(
cycle_time, left_on="Product_Family", right_index=True
)
customer_orders["Delivery_Date"] = pd.to_datetime(
customer_orders["Delivery_Date"]
).dt.strftime("%Y/%m/%d")
customer_orders = customer_orders.sort_values(by=["Delivery_Date", "Order"])
# Define calendar
start_date = datetime.datetime.strptime(
customer_orders["Delivery_Date"].min(), "%Y/%m/%d"
)
end_date = datetime.datetime.strptime(
customer_orders["Delivery_Date"].max(), "%Y/%m/%d"
)
date_modified = start_date
calendar = [start_date.strftime("%Y/%m/%d")]
while date_modified < end_date:
date_modified += datetime.timedelta(days=1)
calendar.append(date_modified.strftime("%Y/%m/%d"))
# Get changeover
data = | pd.read_csv("Planning_model4_list.csv") | pandas.read_csv |
import math
from datetime import datetime, timedelta
import pandas as pd
import requests
from pandas.io.json import json_normalize
def build_query_url(
begin_date, end_date, stationid, product, datum=None, bin_num=None,
interval=None, units='metric', time_zone='gmt'):
"""
Build an URL to be used to fetch data from the NOAA CO-OPS API
(see https://tidesandcurrents.noaa.gov/api/)
"""
base_url = 'http://tidesandcurrents.noaa.gov/api/datagetter?'
# If the data product is water levels, check that a datum is specified
if product == 'water_level':
if datum is None:
raise ValueError('No datum specified for water level data.See'
' https://tidesandcurrents.noaa.gov/api/#datum '
'for list of available datums')
else:
# Compile parameter string for use in URL
parameters = {'begin_date': begin_date,
'end_date': end_date,
'station': stationid,
'product': product,
'datum': datum,
'units': units,
'time_zone': time_zone,
'application': 'py_noaa',
'format': 'json'}
elif product == 'hourly_height':
if datum is None:
raise ValueError('No datum specified for water level data.See'
' https://tidesandcurrents.noaa.gov/api/#datum '
'for list of available datums')
else:
# Compile parameter string for use in URL
parameters = {'begin_date': begin_date,
'end_date': end_date,
'station': stationid,
'product': product,
'datum': datum,
'units': units,
'time_zone': time_zone,
'application': 'py_noaa',
'format': 'json'}
elif product == 'high_low':
if datum is None:
raise ValueError('No datum specified for water level data.See'
' https://tidesandcurrents.noaa.gov/api/#datum '
'for list of available datums')
else:
# Compile parameter string for use in URL
parameters = {'begin_date': begin_date,
'end_date': end_date,
'station': stationid,
'product': product,
'datum': datum,
'units': units,
'time_zone': time_zone,
'application': 'py_noaa',
'format': 'json'}
elif product == 'predictions':
# If no interval provided, return 6-min predictions data
if interval is None:
# Compile parameter string for use in URL
parameters = {'begin_date': begin_date,
'end_date': end_date,
'station': stationid,
'product': product,
'datum': datum,
'units': units,
'time_zone': time_zone,
'application': 'py_noaa',
'format': 'json'}
else:
# Compile parameter string, including interval, for use in URL
parameters = {'begin_date': begin_date,
'end_date': end_date,
'station': stationid,
'product': product,
'datum': datum,
'interval': interval,
'units': units,
'time_zone': time_zone,
'application': 'py_noaa',
'format': 'json'}
# If the data product is currents, check that a bin number is specified
elif product == 'currents':
if bin_num is None:
raise ValueError(
'No bin specified for current data. Bin info can be '
'found on the station info page'
' (e.g., https://tidesandcurrents.noaa.gov/cdata/StationInfo?id=PUG1515)')
else:
# Compile parameter string for use in URL
parameters = {'begin_date': begin_date,
'end_date': end_date,
'station': stationid,
'product': product,
'bin': str(bin_num),
'units': units,
'time_zone': time_zone,
'application': 'py_noaa',
'format': 'json'}
# For all other data types (e.g., meteoroligcal conditions)
else:
# If no interval provided, return 6-min met data
if interval is None:
# Compile parameter string for use in URL
parameters = {'begin_date': begin_date,
'end_date': end_date,
'station': stationid,
'product': product,
'units': units,
'time_zone': time_zone,
'application': 'py_noaa',
'format': 'json'}
else:
# Compile parameter string, including interval, for use in URL
parameters = {'begin_date': begin_date,
'end_date': end_date,
'station': stationid,
'product': product,
'interval': interval,
'units': units,
'time_zone': time_zone,
'application': 'py_noaa',
'format': 'json'}
# Build URL with requests library
query_url = requests.Request(
'GET', base_url, params=parameters).prepare().url
return query_url
def url2pandas(data_url, product, num_request_blocks):
"""
Takes in a provided URL using the NOAA CO-OPS API conventions
(see https://tidesandcurrents.noaa.gov/api/) and converts the corresponding
JSON data into a pandas dataframe.
"""
response = requests.get(data_url) # Get JSON data from URL
json_dict = response.json() # Create a dictionary from JSON data
df = pd.DataFrame() # Initialize a empty DataFrame
# Error when the requested begin_date and/or end_date does not have data
large_data_gap_error = 'No data was found. This product may not be offered at this station at the requested time.'
# Handle coops.get_data() request size & errors from COOPS API, cases below:
# 1. coops.get_data() makes a large request (i.e. >1 block requests)
# and an error occurs in one of the individual blocks of data
# 2. coops.get_data() makes a large request (i.e. >1 block requests)
# and an error does not occur in one of the individual blocks of data
# 3. coops.get_data() makes a small request (i.e. 1 request)
# and an error occurs in the data requested
# 4. coops.get_data() makes a small request (i.e. 1 request)
# and an error does not occur in the data requested
# Case 1
if (num_request_blocks > 1) and ('error' in json_dict):
error_message = json_dict['error'].get('message',
'Error retrieving data')
error_message = error_message.lstrip()
error_message = error_message.rstrip()
if error_message == large_data_gap_error:
return df # Return the empty DataFrame
else:
raise ValueError(
json_dict['error'].get('message', 'Error retrieving data'))
# Case 2
elif (num_request_blocks > 1) and ('error' not in json_dict):
if product == 'predictions':
key = 'predictions'
else:
key = 'data'
df = json_normalize(json_dict[key]) # Parse JSON dict into dataframe
return df
# Case 3
elif (num_request_blocks == 1) and ('error' in json_dict):
raise ValueError(
json_dict['error'].get('message', 'Error retrieving data'))
# Case 4
else:
if product == 'predictions':
key = 'predictions'
else:
key = 'data'
df = json_normalize(json_dict[key]) # Parse JSON dict into dataframe
return df
def parse_known_date_formats(dt_string):
"""Attempt to parse CO-OPS accepted date formats."""
for fmt in ('%Y%m%d', '%Y%m%d %H:%M', '%m/%d/%Y', '%m/%d/%Y %H:%M'):
try:
return datetime.strptime(dt_string, fmt)
except ValueError:
pass
raise ValueError("No valid date format found."
"See https://tidesandcurrents.noaa.gov/api/ "
"for list of accepted date formats.")
def get_data(
begin_date, end_date, stationid, product, datum=None, bin_num=None,
interval=None, units='metric', time_zone='gmt'):
"""
Function to get data from NOAA CO-OPS API and convert it to a pandas
dataframe for convenient analysis.
Info on the NOOA CO-OPS API can be found at https://tidesandcurrents.noaa.gov/api/,
the arguments listed below generally follow the same (or a very similar) format.
Arguments:
begin_date -- the starting date of request (yyyyMMdd, yyyyMMdd HH:mm, MM/dd/yyyy, or MM/dd/yyyy HH:mm), string
end_date -- the ending date of request (yyyyMMdd, yyyyMMdd HH:mm, MM/dd/yyyy, or MM/dd/yyyy HH:mm), string
stationid -- station at which you want data, string
product -- the product type you would like, string
datum -- the datum to be used for water level data, string (default None)
bin_num -- the bin number you would like your currents data at, int (default None)
interval -- the interval you would like data returned, string
units -- units to be used for data output, string (default metric)
time_zone -- time zone to be used for data output, string (default gmt)
"""
# Convert dates to datetime objects so deltas can be calculated
begin_datetime = parse_known_date_formats(begin_date)
end_datetime = parse_known_date_formats(end_date)
delta = end_datetime - begin_datetime
# If the length of our data request is less or equal to 31 days,
# we can pull the data from API in one request
if delta.days <= 31:
data_url = build_query_url(
begin_datetime.strftime("%Y%m%d %H:%M"),
end_datetime.strftime("%Y%m%d %H:%M"),
stationid, product, datum, bin_num, interval, units, time_zone)
df = url2pandas(data_url, product, num_request_blocks=1)
# If the length of the user specified data request is less than 365 days
# AND the product is hourly_height or high_low, we can pull data directly
# from the API in one request
elif delta.days <= 365 and (
product == 'hourly_height' or product == 'high_low'):
data_url = build_query_url(
begin_date, end_date, stationid, product, datum, bin_num, interval,
units, time_zone)
df = url2pandas(data_url, product, num_request_blocks=1)
# If the length of the user specified data request is greater than 365 days
# AND the product is hourly_height or high_low, we need to load data from
# the API in365 day blocks.
elif product == 'hourly_height' or product == 'high_low':
# Find the number of 365 day blocks in our desired period,
# constrain the upper limit of index in the for loop to follow
num_365day_blocks = int(math.floor(delta.days / 365))
df = pd.DataFrame([]) # Empty dataframe for data from API requests
# Loop through in 365 day blocks,
# adjust the begin_datetime and end_datetime accordingly,
# make a request to the NOAA CO-OPS API
for i in range(num_365day_blocks + 1):
begin_datetime_loop = begin_datetime + timedelta(days=(i * 365))
end_datetime_loop = begin_datetime_loop + timedelta(days=365)
# If end_datetime_loop of the current 365 day block is greater
# than end_datetime specified by user, use end_datetime
if end_datetime_loop > end_datetime:
end_datetime_loop = end_datetime
# Build url for each API request as we proceed through the loop
data_url = build_query_url(
begin_datetime_loop.strftime('%Y%m%d'),
end_datetime_loop.strftime('%Y%m%d'),
stationid, product, datum, bin_num, interval, units, time_zone)
df_new = url2pandas(data_url, product, num_365day_blocks) # Get dataframe for block
df = df.append(df_new) # Append to existing dataframe
# If the length of the user specified data request is greater than 31 days
# for any other products, we need to load data from the API in 31 day
# blocks
else:
# Find the number of 31 day blocks in our desired period,
# constrain the upper limit of index in the for loop to follow
num_31day_blocks = int(math.floor(delta.days / 31))
df = pd.DataFrame([]) # Empty dataframe for data from API requests
# Loop through in 31 day blocks,
# adjust the begin_datetime and end_datetime accordingly,
# make a request to the NOAA CO-OPS API
for i in range(num_31day_blocks + 1):
begin_datetime_loop = begin_datetime + timedelta(days=(i * 31))
end_datetime_loop = begin_datetime_loop + timedelta(days=31)
# If end_datetime_loop of the current 31 day block is greater
# than end_datetime specified by user, use end_datetime
if end_datetime_loop > end_datetime:
end_datetime_loop = end_datetime
# Build URL for each API request as we proceed through the loop
data_url = build_query_url(
begin_datetime_loop.strftime('%Y%m%d'),
end_datetime_loop.strftime('%Y%m%d'),
stationid, product, datum, bin_num, interval, units, time_zone)
df_new = url2pandas(data_url, product, num_31day_blocks) # Get dataframe for block
df = df.append(df_new) # Append to existing dataframe
# Rename output dataframe columns based on requested product
# and convert to useable data types
if product == 'water_level':
# Rename columns for clarity
df.rename(columns={'f': 'flags', 'q': 'QC', 's': 'sigma',
't': 'date_time', 'v': 'water_level'},
inplace=True)
# Convert columns to numeric values
data_cols = df.columns.drop(['flags', 'QC', 'date_time'])
df[data_cols] = df[data_cols].apply(
pd.to_numeric, axis=1, errors='coerce')
# Convert date & time strings to datetime objects
df['date_time'] = pd.to_datetime(df['date_time'])
elif product == 'hourly_height':
# Rename columns for clarity
df.rename(columns={'f': 'flags', 's': 'sigma',
't': 'date_time', 'v': 'water_level'},
inplace=True)
# Convert columns to numeric values
data_cols = df.columns.drop(['flags', 'date_time'])
df[data_cols] = df[data_cols].apply(
pd.to_numeric, axis=1, errors='coerce')
# Convert date & time strings to datetime objects
df['date_time'] = pd.to_datetime(df['date_time'])
elif product == 'high_low':
# Rename columns for clarity
df.rename(columns={'f': 'flags', 'ty': 'high_low',
't': 'date_time', 'v': 'water_level'},
inplace=True)
# Separate to high and low dataframes
df_HH = df[df['high_low'] == "HH"].copy()
df_HH.rename(columns={'date_time': 'date_time_HH',
'water_level': 'HH_water_level'},
inplace=True)
df_H = df[df['high_low'] == "H "].copy()
df_H.rename(columns={'date_time': 'date_time_H',
'water_level': 'H_water_level'},
inplace=True)
df_L = df[df['high_low'].str.contains("L ")].copy()
df_L.rename(columns={'date_time': 'date_time_L',
'water_level': 'L_water_level'},
inplace=True)
df_LL = df[df['high_low'].str.contains("LL")].copy()
df_LL.rename(columns={'date_time': 'date_time_LL',
'water_level': 'LL_water_level'},
inplace=True)
# Extract dates (without time) for each entry
dates_HH = [x.date() for x in | pd.to_datetime(df_HH['date_time_HH']) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 11 16:31:58 2021
@author: snoone
"""
import os
import glob
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
OUTDIR = "D:/Python_CDM_conversion/hourly/qff/cdm_out/observations_table"
os.chdir("D:/Python_CDM_conversion/hourly/qff/test")
extension = 'qff'
#my_file = open("D:/Python_CDM_conversion/hourly/qff/ls1.txt", "r")
#all_filenames = my_file.readlines()
#print(all_filenames)
##use alist of file name sto run 5000 parallel
#with open("D:/Python_CDM_conversion/hourly/qff/ls.txt", "r") as f:
# all_filenames = f.read().splitlines()
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
##to start at begining of files
for filename in all_filenames:
##to start at next file after last processe
#for filename in all_filenames[all_filenames.index('SWM00002338.qff'):] :
df=pd.read_csv(filename, sep="|")
##set up master df to extrcat each variable
df["report_id"]=""
df["observation_id"]=""
df["data_policy_licence"]=""
df["date_time_meaning"]="1"
df["observation_duration"]="0"
df["latitude"]=df["Latitude"]
df["longitude"]=df["Longitude"]
df["crs"]=""
df["z_coordinate"]=""
df["z_coordinate_type"]=""
df["observation_height_above_station_surface"]=""
df["observed_variable"]=""
df["secondary_variable"]=""
df["observation_value"]=""
df["value_significance"]="12"
df["secondary_value"]=""
df["units"]=""
df["code_table"]=""
df["conversion_flag"]=""
df["location_method"]=""
df["location_precision"]=""
df["z_coordinate_method"]=""
df["bbox_min_longitude"]=""
df["bbox_max_longitude"]=""
df["bbox_min_latitude"]=""
df["bbox_max_latitude"]=""
df["spatial_representativeness"]=""
df["original_code_table"]=""
df["quality_flag"]=""
df["numerical_precision"]=""
df["sensor_id"]=""
df["sensor_automation_status"]=""
df["exposure_of_sensor"]=""
df["original_precision"]=""
df["original_units"]=""
df["original_code_table"]=""
df["original_value"]=""
df["conversion_method"]=""
df["processing_code"]=""
df["processing_level"]="0"
df["adjustment_id"]=""
df["traceability"]=""
df["advanced_qc"]=""
df["advanced_uncertainty"]=""
df["advanced_homogenisation"]=""
df["advanced_assimilation_feedback"]=""
df["source_id"]=""
df["source_record_id"]=""
df["primary_station_id"]=df["Station_ID"]
df["Timestamp2"] = df["Year"].map(str) + "-" + df["Month"].map(str)+ "-" + df["Day"].map(str)
df["Seconds"]="00"
df["offset"]="+00"
df["date_time"] = df["Timestamp2"].map(str)+ " " + df["Hour"].map(str)+":"+df["Minute"].map(str)+":"+df["Seconds"].map(str)
df['date_time'] = pd.to_datetime(df['date_time'], format='%Y/%m/%d' " ""%H:%M")
df['date_time'] = df['date_time'].astype('str')
df.date_time = df.date_time + '+00'
#=========================================================================================
##convert temperature changes for each variable
dft = df[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id","primary_station_id"]]
##change for each variable to convert to cdm compliant values
dft["observation_value"]=df["temperature"]+273.15
dft["source_id"]=df["temperature_Source_Code"]
dft["Seconds"]="00"
dft["quality_flag"]=df["temperature_QC_flag"]
dft["qc_method"]=dft["quality_flag"]
dft["conversion_flag"]="0"
dft["conversion_method"]="1"
dft["numerical_precision"]="0.01"
dft["original_precision"]="0.1"
dft["original_units"]="60"
dft["original_value"]=df["temperature"]
dft["observation_height_above_station_surface"]="2"
dft["units"]="5"
dft["observed_variable"]="85"
##set quality flag from df master for variable and fill all nan with Null then change all nonnan to
dft.loc[dft['quality_flag'].notnull(), "quality_flag"] = 1
dft = dft.fillna("Null")
dft.quality_flag[dft.quality_flag == "Null"] = 0
#change for each variable if required
##remove unwanted mising data rows
dft = dft.fillna("null")
dft = dft.replace({"null":"-99999"})
dft = dft[dft.observation_value != -99999]
#df = df.astype(str)
dft["source_id"] = pd.to_numeric(dft["source_id"],errors='coerce')
#df = df.astype(str)
#concatenate columns for joining df for next step
dft['source_id'] = dft['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dft['primary_station_id_2']=dft['primary_station_id'].astype(str)+'-'+dft['source_id'].astype(str)
dft["observation_value"] = pd.to_numeric(dft["observation_value"],errors='coerce')
#dft.to_csv("ttest.csv", index=False, sep=",")
###add data policy and record number to df
df2=pd.read_csv("D:/Python_CDM_conversion/new recipe tables/record_id.csv")
dft = dft.astype(str)
df2 = df2.astype(str)
dft= df2.merge(dft, on=['primary_station_id_2'])
dft['data_policy_licence'] = dft['data_policy_licence_x']
dft['data_policy_licence'] = dft['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dft['observation_id']=dft['primary_station_id'].astype(str)+'-'+dft['record_number'].astype(str)+'-'+dft['date_time'].astype(str)
dft['observation_id'] = dft['observation_id'].str.replace(r' ', '-')
##remove unwanted last twpo characters
dft['observation_id'] = dft['observation_id'].str[:-6]
dft["observation_id"]=dft["observation_id"]+'-'+dft['observed_variable'].astype(str)+'-'+dft['value_significance'].astype(str)
dft["report_id"]=dft["observation_id"].str[:-6]
##set up qc table
dft = dft[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id"]]
df.dropna(subset = ["observation_value"], inplace=True)
dft['source_id'] = dft['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dft['data_policy_licence'] = dft['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dft["source_id"] = pd.to_numeric(dft["source_id"],errors='coerce')
dft["observation_value"] = pd.to_numeric(dft["observation_value"],errors='coerce')
dft["observation_value"]= dft["observation_value"].round(2)
#dft.to_csv("isuest.csv", index=False, sep=",")
#=================================================================================
##convert dew point temperature changes for each variable
dfdpt= df[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id","primary_station_id"]]
##change for each variable to convert to cdm compliant values
dfdpt["observation_value"]=df["dew_point_temperature"]+273.15
dfdpt["source_id"]=df["dew_point_temperature_Source_Code"]
dfdpt["Seconds"]="00"
dfdpt["quality_flag"]=df["dew_point_temperature_QC_flag"]
dfdpt["conversion_flag"]="0"
dfdpt["conversion_method"]="1"
dfdpt["numerical_precision"]="0.01"
dfdpt["original_precision"]="0.1"
dfdpt["original_units"]="60"
dfdpt["original_value"]=df["dew_point_temperature"]
dfdpt["observation_height_above_station_surface"]="2"
dfdpt["units"]="5"
dfdpt["observed_variable"]="36"
##set quality flag from df master for variable and fill all nan with Null then change all nonnan to
dfdpt.loc[dfdpt['quality_flag'].notnull(), "quality_flag"] = 1
dfdpt= dfdpt.fillna("Null")
dfdpt.quality_flag[dfdpt.quality_flag == "Null"] = 0
##remove unwanted mising data rows
dfdpt= dfdpt.fillna("null")
dfdpt= dfdpt.replace({"null":"-99999"})
dfdpt= dfdpt[dfdpt.observation_value != -99999]
#df = df.astype(str)
dfdpt["source_id"] = pd.to_numeric(dfdpt["source_id"],errors='coerce')
#df = df.astype(str)
#concatenate columns for joining df for next step
dfdpt['source_id'] = dfdpt['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dfdpt['primary_station_id_2']=dfdpt['primary_station_id'].astype(str)+'-'+dfdpt['source_id'].astype(str)
dfdpt["observation_value"] = pd.to_numeric(dfdpt["observation_value"],errors='coerce')
#dfdpt.to_csv("ttest.csv", index=False, sep=",")
###add data policy and record number to df
df2=pd.read_csv("D:/Python_CDM_conversion/new recipe tables/record_id.csv")
dfdpt= dfdpt.astype(str)
df2 = df2.astype(str)
dfdpt= df2.merge(dfdpt, on=['primary_station_id_2'])
dfdpt['data_policy_licence'] = dfdpt['data_policy_licence_x']
dfdpt['data_policy_licence'] = dfdpt['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dfdpt['observation_id']=dfdpt['primary_station_id'].astype(str)+'-'+dfdpt['record_number'].astype(str)+'-'+dfdpt['date_time'].astype(str)
dfdpt['observation_id'] = dfdpt['observation_id'].str.replace(r' ', '-')
##remove unwanted last twpo characters
dfdpt['observation_id'] = dfdpt['observation_id'].str[:-6]
dfdpt["observation_id"]=dfdpt["observation_id"]+'-'+dfdpt['observed_variable'].astype(str)+'-'+dfdpt['value_significance'].astype(str)
dfdpt["report_id"]=dfdpt["observation_id"].str[:-6]
##set up qc table
dfdpt= dfdpt[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id"]]
dfdpt.dropna(subset = ["observation_value"], inplace=True)
dfdpt['source_id'] = dfdpt['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dfdpt['data_policy_licence'] = dfdpt['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dfdpt["source_id"] = pd.to_numeric(dfdpt["source_id"],errors='coerce')
dfdpt["observation_value"] = pd.to_numeric(dfdpt["observation_value"],errors='coerce')
dfdpt["observation_value"]= dfdpt["observation_value"].round(2)
#====================================================================================
#convert station level to cdmlite
dfslp = df[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id","primary_station_id"]]
##change for each variable to convert to cdm compliant values
dfslp["observation_value"]=df["station_level_pressure"]
dfslp["source_id"]=df["station_level_pressure_Source_Code"]
dfslp["Seconds"]="00"
dfslp["quality_flag"]=df["station_level_pressure_QC_flag"]
dfslp["conversion_flag"]="0"
dfslp["conversion_method"]="7"
dfslp["numerical_precision"]="10"
dfslp["original_precision"]="0.1"
dfslp["original_units"]="530"
dfslp["original_value"]=df["station_level_pressure"]
dfslp["observation_height_above_station_surface"]="2"
dfslp["units"]="32"
dfslp["observed_variable"]="57"
##set quality flag from df master for variable and fill all nan with Null then change all nonnan to
dfslp.loc[dfslp['quality_flag'].notnull(), "quality_flag"] = 1
dfslp = dfslp.fillna("Null")
dfslp.quality_flag[dfslp.quality_flag == "Null"] = 0
#change for each variable if required
##remove unwanted mising data rows
dfslp = dfslp.fillna("null")
dfslp = dfslp.replace({"null":"-99999"})
dfslp = dfslp[dfslp.observation_value != -99999]
#df = df.astype(str)
dfslp["source_id"] = pd.to_numeric(dfslp["source_id"],errors='coerce')
#df = df.astype(str)
#concatenate columns for joining df for next step
dfslp['source_id'] = dfslp['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dfslp['primary_station_id_2']=dfslp['primary_station_id'].astype(str)+'-'+dfslp['source_id'].astype(str)
dfslp["observation_value"] = pd.to_numeric(dfslp["observation_value"],errors='coerce')
#dfslp.to_csv("ttest.csv", index=False, sep=",")
###add data policy and record number to df
df2=pd.read_csv("D:/Python_CDM_conversion/new recipe tables/record_id.csv")
dfslp = dfslp.astype(str)
df2 = df2.astype(str)
dfslp= df2.merge(dfslp, on=['primary_station_id_2'])
dfslp['data_policy_licence'] = dfslp['data_policy_licence_x']
dfslp['data_policy_licence'] = dfslp['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dfslp['observation_id']=dfslp['primary_station_id'].astype(str)+'-'+dfslp['record_number'].astype(str)+'-'+dfslp['date_time'].astype(str)
dfslp['observation_id'] = dfslp['observation_id'].str.replace(r' ', '-')
##remove unwanted last twpo characters
dfslp['observation_id'] = dfslp['observation_id'].str[:-6]
dfslp["observation_id"]=dfslp["observation_id"]+'-'+dfslp['observed_variable'].astype(str)+'-'+dfslp['value_significance'].astype(str)
dfslp["report_id"]=dfslp["observation_id"].str[:-6]
##set up qc table
dfslp = dfslp[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id"]]
##make sure no decimal places an dround value to reuqred decimal places
dfslp['observation_value'] = dfslp['observation_value'].map(float)
dfslp['observation_value'] = (dfslp['observation_value']*100)
dfslp['observation_value'] = dfslp['observation_value'].map(int)
dfslp['source_id'] = dfslp['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dfslp['data_policy_licence'] = dfslp['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dfslp["source_id"] = pd.to_numeric(dfslp["source_id"],errors='coerce')
dfslp['observation_value'] = dfslp['observation_value'].astype(str).apply(lambda x: x.replace('.0',''))
#dfslp.to_csv("slp.csv", index=False, sep=",")
#===========================================================================================
#convert sea level presure to cdmlite
dfmslp = df[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id","primary_station_id"]]
##change for each variable to convert to cdm compliant values
dfmslp["observation_value"]=df["sea_level_pressure"]
dfmslp["source_id"]=df["sea_level_pressure_Source_Code"]
dfmslp["Seconds"]="00"
dfmslp["quality_flag"]=df["sea_level_pressure_QC_flag"]
dfmslp["conversion_flag"]="0"
dfmslp["conversion_method"]="7"
dfmslp["numerical_precision"]="10"
dfmslp["original_precision"]="0.1"
dfmslp["original_units"]="530"
dfmslp["original_value"]=df["temperature"]
dfmslp["observation_height_above_station_surface"]="2"
dfmslp["units"]="32"
dfmslp["observed_variable"]="58"
##set quality flag from df master for variable and fill all nan with Null then change all nonnan to
dfmslp.loc[dfmslp['quality_flag'].notnull(), "quality_flag"] = 1
dfmslp = dfmslp.fillna("null")
dfmslp.quality_flag[dfmslp.quality_flag == "Null"] = 0
#change for each variable if required
##remove unwanted mising data rows
dfmslp = dfmslp.fillna("null")
dfmslp = dfmslp.replace({"null":"-99999"})
dfmslp = dfmslp[dfmslp.observation_value != -99999]
#df = df.astype(str)
dfmslp["source_id"] = pd.to_numeric(dfmslp["source_id"],errors='coerce')
#df = df.astype(str)
#concatenate columns for joining df for next step
dfmslp['source_id'] = dfmslp['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dfmslp['primary_station_id_2']=dfmslp['primary_station_id'].astype(str)+'-'+dfmslp['source_id'].astype(str)
dfmslp["observation_value"] = pd.to_numeric(dfmslp["observation_value"],errors='coerce')
#dfmslp.to_csv("ttest.csv", index=False, sep=",")
###add data policy and record number to df
df2= | pd.read_csv("D:/Python_CDM_conversion/new recipe tables/record_id.csv") | pandas.read_csv |
# Lint as: python3
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for client."""
import os
from typing import List
import csv
import tempfile
import unittest
import mock
import pandas as pd
from tfrecorder import client
from tfrecorder import constants
from tfrecorder import test_utils
class ClientTest(unittest.TestCase):
"""Misc tests for `client` module."""
def setUp(self):
self.test_df = test_utils.get_test_df()
self.test_region = 'us-central1'
self.test_project = 'foo'
@mock.patch('tfrecorder.client.beam_pipeline')
def test_create_tfrecords_direct_runner(self, mock_beam):
"""Tests `create_tfrecords` Direct case."""
mock_beam.build_pipeline().run().wait_until_finished.return_value = {
'rows':6}
r = client.create_tfrecords(
self.test_df,
runner='DirectRunner',
output_dir='/tmp/direct_runner')
self.assertTrue('metrics' in r)
@mock.patch('tfrecorder.client.beam_pipeline')
def test_create_tfrecords_dataflow_runner(self, mock_beam):
"""Tests `create_tfrecords` Dataflow case."""
mock_beam.build_pipeline().run().job_id.return_value = 'foo_id'
df2 = self.test_df.copy()
df2[constants.IMAGE_URI_KEY] = 'gs://' + df2[constants.IMAGE_URI_KEY]
outdir = '/tmp/dataflow_runner'
expected = {
'job_id': 'foo_id',
'dataflow_url': 'https://console.cloud.google.com/dataflow/jobs/' +
'us-central1/foo_id?project=foo'}
os.makedirs(outdir, exist_ok=True)
r = client.create_tfrecords(
df2,
runner='DataflowRunner',
output_dir=outdir,
region=self.test_region,
project=self.test_project)
self.assertEqual(r, expected)
# pylint: disable=protected-access
class InputValidationTest(unittest.TestCase):
"""'Tests for validation input data."""
def setUp(self):
self.test_df = test_utils.get_test_df()
self.test_region = 'us-central1'
self.test_project = 'foo'
def test_valid_dataframe(self):
"""Tests valid DataFrame input."""
self.assertIsNone(
client._validate_data(
self.test_df))
def test_missing_image(self):
"""Tests missing image column."""
with self.assertRaises(AttributeError):
df2 = self.test_df.copy()
df2.drop('image_uri', inplace=True, axis=1)
client._validate_data(df2)
def test_missing_label(self):
"""Tests missing label column."""
with self.assertRaises(AttributeError):
df2 = self.test_df.copy()
df2.drop('label', inplace=True, axis=1)
client._validate_data(df2)
def test_missing_split(self):
"""Tests missing split column."""
with self.assertRaises(AttributeError):
df2 = self.test_df.copy()
df2.drop('split', inplace=True, axis=1)
client._validate_data(df2)
def test_columns_out_of_order(self):
"""Tests validating wrong column order."""
with self.assertRaises(AttributeError):
df2 = self.test_df.copy()
cols = ['image_uri', 'split', 'label']
df2 = df2[cols]
client._validate_data(df2)
def test_valid_runner(self):
"""Tests valid runner."""
self.assertIsNone(client._validate_runner(
self.test_df,
runner='DirectRunner',
project=self.test_project,
region=self.test_region))
def test_invalid_runner(self):
"""Tests invalid runner."""
with self.assertRaises(AttributeError):
client._validate_runner(
self.test_df,
runner='FooRunner',
project=self.test_project,
region=self.test_region)
def test_local_path_with_dataflow_runner(self):
"""Tests DataflowRunner conflict with local path."""
with self.assertRaises(AttributeError):
client._validate_runner(
self.df_test,
runner='DataflowRunner',
project=self.test_project,
region=self.test_region)
def test_gcs_path_with_dataflow_runner(self):
"""Tests DataflowRunner with GCS path."""
df2 = self.test_df.copy()
df2[constants.IMAGE_URI_KEY] = 'gs://' + df2[constants.IMAGE_URI_KEY]
self.assertIsNone(
client._validate_runner(
df2,
runner='DataflowRunner',
project=self.test_project,
region=self.test_region))
def test_gcs_path_with_dataflow_runner_missing_param(self):
"""Tests DataflowRunner with missing required parameter."""
df2 = self.test_df.copy()
df2[constants.IMAGE_URI_KEY] = 'gs://' + df2[constants.IMAGE_URI_KEY]
for p, r in [
(None, self.test_region), (self.test_project, None), (None, None)]:
with self.assertRaises(AttributeError) as context:
client._validate_runner(
df2,
runner='DataflowRunner',
project=p,
region=r)
self.assertTrue('DataflowRunner requires valid `project` and `region`'
in repr(context.exception))
def _make_csv_tempfile(data: List[List[str]]) -> tempfile.NamedTemporaryFile:
"""Returns `NamedTemporaryFile` representing an image CSV."""
f = tempfile.NamedTemporaryFile(mode='w+t', suffix='.csv')
writer = csv.writer(f, delimiter=',')
for row in data:
writer.writerow(row)
f.seek(0)
return f
def get_sample_image_csv_data() -> List[List[str]]:
"""Returns sample CSV data in Image CSV format."""
data = test_utils.get_test_data()
header = list(data.keys())
content = [list(row) for row in zip(*data.values())]
return [header] + content
class ReadCSVTest(unittest.TestCase):
"""Tests `read_csv`."""
def setUp(self):
data = get_sample_image_csv_data()
self.header = data.pop(0)
self.sample_data = data
def test_valid_csv_no_header_no_names_specified(self):
"""Tests a valid CSV without a header and no header names given."""
f = _make_csv_tempfile(self.sample_data)
actual = client.read_csv(f.name, header=None)
self.assertEqual(list(actual.columns), constants.IMAGE_CSV_COLUMNS)
self.assertEqual(actual.values.tolist(), self.sample_data)
def test_valid_csv_no_header_names_specified(self):
"""Tests valid CSV without a header, but header names are given."""
f = _make_csv_tempfile(self.sample_data)
actual = client.read_csv(f.name, header=None, names=self.header)
self.assertEqual(list(actual.columns), self.header)
self.assertEqual(actual.values.tolist(), self.sample_data)
def test_valid_csv_with_header_no_names_specified(self):
"""Tests valid CSV with header, and no header names given (inferred)."""
f = _make_csv_tempfile([self.header] + self.sample_data)
actual = client.read_csv(f.name)
self.assertEqual(list(actual.columns), self.header)
self.assertEqual(actual.values.tolist(), self.sample_data)
def test_valid_csv_with_header_names_specified(self):
"""Tests valid CSV with header, and header names given (override)."""
f = _make_csv_tempfile([self.header] + self.sample_data)
actual = client.read_csv(f.name, names=self.header, header=0)
self.assertEqual(list(actual.columns), self.header)
self.assertEqual(actual.values.tolist(), self.sample_data)
class ToDataFrameTest(unittest.TestCase):
"""Tests `to_dataframe`."""
def setUp(self) -> None:
sample_data = get_sample_image_csv_data()
columns = sample_data.pop(0)
self.input_df = pd.DataFrame(sample_data, columns=columns)
@mock.patch.object(client, 'read_csv', autospec=True)
def test_input_csv(self, read_csv):
"""Tests valid input CSV file."""
expected = self.input_df
read_csv.return_value = expected
f = _make_csv_tempfile(get_sample_image_csv_data())
actual = client.to_dataframe(f.name)
pd.testing.assert_frame_equal(actual, expected)
def test_input_dataframe_no_names_specified(self):
"""Tests valid input dataframe with no header names specified."""
actual = client.to_dataframe(self.input_df)
pd.testing.assert_frame_equal(actual, self.input_df)
def test_input_dataframe_with_header(self):
"""Tests valid input dataframe with header specified."""
names = list(self.input_df.columns[0:-1])
actual = client.to_dataframe(self.input_df, names=names)
| pd.testing.assert_frame_equal(actual, self.input_df[names]) | pandas.testing.assert_frame_equal |
from delphin_6_automation.database_interactions import general_interactions
from delphin_6_automation.database_interactions import mongo_setup
from delphin_6_automation.database_interactions.auth import auth_dict
from delphin_6_automation.database_interactions import delphin_interactions
from delphin_6_automation.database_interactions.db_templates import delphin_entry
import os
import pandas as pd
mongo_setup.global_init(auth_dict)
folder = r'U:\RIBuild\2D_1D\Results'
simulated_id_folder = r'U:\RIBuild\2D_1D\Delphin Project\4A'
def download(sim_obj):
result_id = str(sim_obj.results_raw.id)
general_interactions.download_raw_result(result_id, folder)
delphin_interactions.download_delphin_entry(sim_obj, folder + f'/{result_id}')
excel_frame = pd.DataFrame(columns=['Simulation ID', 'Result ID', 'Dimension', 'Brick Type', 'Plaster Type',
'Insulation Type', 'Acronym'])
bricks1d = ['AltbauziegelDresdenZP', 'AltbauziegelDresdenZD', 'AltbauziegelRoteKasernePotsdamInnenziegel1',
'LimePlasterHist']
bricks2d = ['AltbauziegelDresdenZP', 'AltbauziegelDresdenZD', 'AltbauziegelRoteKasernePotsdamInnenziegel1']
plaster = ['LimeCementMortarHighCementRatio', 'LimeCementMortarLowCementRatio']
insulation = ['RemmersCalciumsilikatSchimmelsanierplatte2']
def create_acronym(dimension, brick, plaster, insulation):
acronym = ''
if brick == 'AltbauziegelDresdenZP':
acronym += 'dresden_zp'
elif brick == 'AltbauziegelDresdenZD':
acronym += 'dresden_zd'
elif brick == 'AltbauziegelRoteKasernePotsdamInnenziegel1':
acronym += 'potsdam'
elif brick == 'LimePlasterHist':
acronym += 'mortar'
if plaster == 'LimeCementMortarHighCementRatio':
acronym += '_high_cement'
elif plaster == 'LimeCementMortarLowCementRatio':
acronym += '_low_cement'
if insulation == 'RemmersCalciumsilikatSchimmelsanierplatte2':
acronym += '_insulated'
else:
acronym += '_uninsulated'
acronym += f'_36_4a_{dimension.lower()}'
return acronym
simulation_id = []
result_id = []
dimensions = []
brick_type = []
plaster_type = []
insulation_type = []
acronyms = []
for file in os.listdir(simulated_id_folder):
if file.endswith('.txt'):
file_obj = open(simulated_id_folder + '/' + file)
lines = file_obj.readlines()
for line in lines:
entry = delphin_entry.Delphin.objects(id=line[:-1]).first()
if entry.simulated and not os.path.exists(folder + f'\{str(entry.results_raw.id)}'):
simulation_id.append(str(entry.id))
result_id.append(str(entry.results_raw.id))
dimensions.append(f"{entry.dimensions}D")
for mat in entry.materials:
if entry.dimensions == 1 and mat.material_name in bricks1d:
brick_type.append(mat.material_name)
elif mat.material_name in bricks2d:
brick_type.append(mat.material_name)
plaster_type.append(' '.join([mat.material_name
for mat in entry.materials
if mat.material_name in plaster]))
insulation_type.append(' '.join([mat.material_name
for mat in entry.materials
if mat.material_name in insulation]))
acronyms.append(create_acronym(dimensions[-1], brick_type[-1], plaster_type[-1], insulation_type[-1]))
print(f"Simulation ID: {str(entry.id)}, Result ID: {str(entry.results_raw.id)}, "
f"Materials: {' '.join([mat.material_name for mat in entry.materials])}, "
f"Dimension: {entry.dimensions}\n")
download(entry)
excel_frame['Simulation ID'] = simulation_id
excel_frame['Result ID'] = result_id
excel_frame['Dimension'] = dimensions
excel_frame['Plaster Type'] = plaster_type
excel_frame['Brick Type'] = brick_type
excel_frame['Insulation Type'] = insulation_type
excel_frame['Acronym'] = acronyms
writer = | pd.ExcelWriter(r'U:\RIBuild\2D_1D\4A_36_Acronyms.xlsx') | pandas.ExcelWriter |
import numpy as np
import pandas as pd
import fiona
import io
from shapely import geometry
import click
from wit_tooling import query_wit_data
def shape_list(key, values, shapefile):
"""
Get a generator of shapes from the given shapefile
key: the key to match in 'properties' in the shape file
values: a list of property values
shapefile: the name of your shape file
e.g. key='ORIGID', values=[1, 2, 3, 4, 5],
shapefile='/g/data/r78/DEA_Wetlands/shapefiles/MDB_ANAE_Aug2017_modified_2019_SB_3577.shp'
"""
count = len(values)
with fiona.open(shapefile) as allshapes:
for shape in allshapes:
shape_id = shape['properties'].get(key)
if shape_id is None:
continue
if isinstance(shape_id, float):
shape_id = int(shape_id)
if shape_id in values:
yield(shape_id, shape)
count -= 1
if count <= 0:
break
def get_areas(features, pkey='SYSID'):
"""
Calculate the area of a list/generator of shapes
input:
features: a list of shapes indexed by the key
output:
a dataframe of area index by the key
"""
re = pd.DataFrame()
for f in features:
va = pd.DataFrame([[f[0], geometry.shape(f[1]['geometry']).area/1e4]], columns=[pkey, 'area'])
re = re.append(va, sort=False)
return re.set_index(pkey)
def dump_wit_data(key, feature_list, output, batch=-1):
"""
dump wit data from the database into a file
input:
key: Name to id the polygon
feature_list: a list or generator of features
output:
a csv file to save all the wit data
"""
count = 0
if batch > 0:
fname = output.split('.')[0]
sub_fname = fname + '_0.csv'
appendix = 0
else:
sub_fname = output
for f_id, f in feature_list:
_, wit_data = query_wit_data(f)
csv_buf = io.StringIO()
wit_df = pd.DataFrame(data=wit_data, columns=['TIME', 'BS', 'NPV', 'PV', 'WET', 'WATER'])
wit_df.insert(0, key, f_id)
wit_df.to_csv(csv_buf, index=False, header=False)
csv_buf.seek(0)
with open(sub_fname, 'a') as f:
f.write(csv_buf.read())
if batch < 0:
continue
count += 1
if count >= batch:
with open(sub_fname, 'a') as f:
f.write(','.join(list(wit_df.columns)))
count = 0
appendix += 1
sub_fname = fname + '_' + str(appendix) + '.csv'
if count < batch or batch < 0:
with open(sub_fname, 'a') as f:
f.write(','.join(list(wit_df.columns)))
def annual_metrics(wit_data, members=['PV', 'WET', 'WATER', 'BS', 'NPV', ['NPV', 'PV', 'WET'],
['PV', 'WET'], ['WATER', 'WET']], threshold=[25, 75], pkey='SYSID'):
"""
Compute the annual max, min, mean, count with given wit data, members and threshold
input:
wit_data: dataframe of WIT
members: the elements which the metrics are computed against, can be a column from wit_data, e.g. 'PV'
or the sum of wit columns, e.g. ['WATER', 'WET']
threshold: a list of thresholds such that (elements >= threshold[i]) is True,
where i = 0, 1...len(threshold)-1
output:
dataframe of metrics
"""
years = wit_data['TIME']
i = 0
wit_df = wit_data.copy(deep=True)
for m in members:
if isinstance(m, list):
wit_df.insert(wit_df.columns.size+i, '+'.join(m), wit_df[m].sum(axis=1))
years = pd.DatetimeIndex(wit_df['TIME']).year.unique()
shape_id_list = wit_df[pkey].unique()
#shane changed 4 to 5 to accomodate median added below
wit_metrics = [pd.DataFrame()] * 5
for y in years:
wit_yearly = wit_df[pd.DatetimeIndex(wit_df['TIME']).year==y].drop(columns=['TIME']).groupby(pkey).max()
wit_yearly.insert(0, 'YEAR', y)
wit_yearly = wit_yearly.rename(columns={n: n+'_max' for n in wit_yearly.columns[1:]})
wit_metrics[0] = wit_metrics[0].append(wit_yearly, sort=False)
for y in years:
wit_yearly = wit_df[pd.DatetimeIndex(wit_df['TIME']).year==y].drop(columns=['TIME']).groupby(pkey).min()
wit_yearly.insert(0, 'YEAR', y)
wit_yearly = wit_yearly.rename(columns={n: n+'_min' for n in wit_yearly.columns[1:]})
wit_metrics[1] = wit_metrics[1].append(wit_yearly, sort=False)
for y in years:
wit_yearly = wit_df[pd.DatetimeIndex(wit_df['TIME']).year==y].drop(columns=['TIME']).groupby(pkey).mean()
wit_yearly.insert(0, 'YEAR', y)
wit_yearly = wit_yearly.rename(columns={n: n+'_mean' for n in wit_yearly.columns[1:]})
wit_metrics[2] = wit_metrics[2].append(wit_yearly, sort=False)
#*********************** START ADDED BY SHANE ***********************
#adding median
for y in years:
wit_yearly = wit_df[ | pd.DatetimeIndex(wit_df['TIME']) | pandas.DatetimeIndex |
"""Plots graphs of timings"""
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
def main():
"""Saves plots of benchmarks to disk"""
io_df = pd.read_csv("benchmark_timings_iolimited.csv")
cpu_df = pd.read_csv("benchmark_timings_cpulimited.csv")
def plot(df, title):
"""plots graphs of timings"""
df["groupname"] = df.groupname.str.split("_benchmark", expand=True)[0]
sns.set(font_scale=1.30)
def barplot_err(x, y, xerr=None, yerr=None, data=None, **kwargs):
"""Plot a bar graph with hand defined symmetrical error bars"""
_data = []
for _i in data.index:
_data_i = | pd.concat([data.loc[_i:_i]] * 3, ignore_index=True, sort=False) | pandas.concat |
import pandas as pd
import os
import config
import models
from sqlalchemy.orm import sessionmaker
session = sessionmaker(bind=config.ENGINE)()
manual_themes = 'manual_themes'
cleaned_themes = 'cleaned_themes'
manual_territories = 'territories'
cleaned_territories = 'cleaned_territories'
comment_col = 'comment'
territory_col = 'territories'
territory_theme_type = 'МР/ГО'
def split_names(text):
if pd.isnull(text):
return []
names = text.replace(';', ',').replace(':', ',').split(',')
return names
def get_df_with_cleaned_names(df, from_col, to_col):
names = []
for idx, row in df.iterrows():
names.append(split_names(row[from_col]))
df[to_col] = names
return df
def manual_themes_to_database(df):
for idx, row in df.iterrows():
comment = session.query(models.Comment).filter(models.Comment.text == row[comment_col]).first()
if comment and (row[cleaned_themes] or row[cleaned_themes]):
for theme_name in row[cleaned_themes]:
theme = models.get_or_create(session, models.Theme, name=theme_name.strip())[0]
theme_comment = models.get_or_create(session, models.ThemeComment,
theme_id=theme.id, comment_id=comment.id)[0]
for territory in row[cleaned_territories]:
theme_type = models.get_or_create(session, models.ThemeType, name=territory_theme_type)[0]
theme = models.get_or_create(session, models.Theme, name=theme_name.strip(),
theme_type_id=theme_type.id)[0]
theme_comment = models.get_or_create(session, models.ThemeComment,
theme_id=theme.id, comment_id=comment.id)[0]
session.commit()
def get_dataframes_from_directory(directory='./'):
dataframes = []
for subdir, dirs, files in os.walk(directory):
for file in files:
if file.endswith('.xlsx'):
df = pd.read_excel(file)
df_cleaned = get_df_with_cleaned_names(df, manual_themes, cleaned_themes)
df_cleaned = get_df_with_cleaned_names(df_cleaned, manual_territories, cleaned_territories)
dataframes.append(df_cleaned)
return dataframes
def get_dataframe_from_database():
sql = '''SELECT comment_id, theme_id, theme.name
FROM public.theme_comment
LEFT JOIN theme ON theme.id = theme_id'''
data = session.execute(sql).fetchall()
df = pd.DataFrame(data)
return df
if __name__ == '__main__':
dataframes = get_dataframes_from_directory()
df = | pd.concat([df[[cleaned_themes, cleaned_territories, comment_col]] for df in dataframes]) | pandas.concat |
# Copyright (c) 2020 ING Bank N.V.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from ..utils import NotFittedError, UnsupportedModelError
import numpy as np
import pandas as pd
import copy
from sklearn.cluster import KMeans
from probatus.utils import shap_helpers
def return_confusion_metric(y_true, y_score, normalize = False):
"""
Computes a confusion metric as absolute difference between the y_true and y_score.
If normalize eis set to tru, it will normalize y_score to the maximum value in the array
Args:
y_true: (np.ndarray or pd.Series) true targets
y_score: (np.ndarray or pd.Series) model output
normalize: boolean, normalize or not to the maximum value
Returns: (np.ndarray or pd.Series) conflusion metric
"""
if normalize:
y_score = y_score/y_score.max()
return np.abs(y_true - y_score)
class BaseInspector(object):
def __init__(self, algotype, **kwargs):
self.fitted = False
self.algotype = algotype
# TODO fix compilatiopn issue on for hdbscan
# if algotype =='dbscan':
# self.clusterer = hdbscan.HDBSCAN(prediction_data=True,**kwargs)
if algotype =='kmeans':
self.clusterer = KMeans(**kwargs)
else:
raise UnsupportedModelError("The algorithm {} is not supported".format(algotype))
def __repr__(self):
repr_ = "{},\n\t{}".format(self.__class__.__name__,self.algotype)
if self.fitted:
repr_ += "\n\tTotal clusters {}".format(np.unique(self.clusterer.labels_).shape[0])
return repr_
def fit_clusters(self, X):
"""
Perform the fit of the clusters with the algorithm specified in the constructor
Args:
X: input features
Returns: cluster labels
"""
self.clusterer.fit(X)
self.fitted = True
return self
def predict_clusters(self,X):
if not self.fitted:
raise NotFittedError("Inspector not fitter. Run .fit()")
labels = None
if self.algotype == 'kmeans':
labels = self.clusterer.predict(X)
if self.algotype == 'dbscan':
raise NotImplementedError("Implementation not finished (note the hdbscan package is not imported yet!)")
#labels, strengths = hdbscan.approximate_predict(self.clusterer, X)
return labels
@staticmethod
def assert_is_dataframe(df):
if isinstance(df,pd.DataFrame):
return df
elif isinstance(df,np.ndarray) and len(df.shape)==2:
return pd.DataFrame(df)
else:
raise NotImplementedError("Sorry, X needs to be a pd.DataFrame for for a 2 dimensional numpy array")
@staticmethod
def assert_is_series(series, index=None):
if isinstance(series, pd.Series):
return series
elif isinstance(series, pd.DataFrame) and series.shape[1] == 1:
return pd.Series(series.values.ravel(), index=series.index)
elif isinstance(series, np.ndarray) and len(series.shape) == 1 and index is not None:
return | pd.Series(series, index=index) | pandas.Series |
import time # 引入time模块
import pandas as pd
import re
import sqlparse
attributeNameArray = ['tableName', 'createTime', 'lastModifyTime', 'owner', 'rowNumber', 'columnNumber',
'primaryKey', 'uniqueKey', 'foreignKey', 'notNullColumn', 'indexColumn', 'columnDataType']
remarksList = ['表名', '创建时间', '最后修改时间', '所有者', '数据行数', '字段数', '主键',
'唯一键', '外键', '不能为空字段', '索引字段', '数据类型']
# 这个函数是自己的拼接函数 str2TableClass 中会调用
def myConcat(array: list, separator: str):
temp = ""
for i in range(0, len(array)):
temp += array[i] + separator
temp = temp[:-1]
return temp
# 这个函数用来根据正则解析传入的create table指令 数据分解出来 tableinit 会调用
def str2TableClass(tempStr: str, tableName: str):
tempStr = re.search(r"[(](.*)[)]", tempStr).group(1) # 拿到括号里的内容
primaryKey = ""
uniqueKey = ""
foreignKey = ""
# primary key部分
p1 = re.search(r"primary key(.*?)[(](.*?)[)]", tempStr)
# print(p1.group(0))
# print(p1.group(2) + " 主键值")
if p1 is not None:
primaryKey = p1.group(2).strip()
primaryKeyList = primaryKey.split(",")
for index, ele in enumerate(primaryKeyList):
primaryKeyList[index] = ele.strip()
primaryKey = myConcat(primaryKeyList, ",")
tempStr = re.sub(r"primary key(.*?)[(](.*?)[)]", "", tempStr) # 删除primary key 防止影响到后边内容
# unique key部分
p2 = re.search(r"unique key(.*?)[(](.*?)[)]", tempStr)
# print(p2.group(0))
# print(p2.group(2) + " 唯一键值")
if p2 is not None:
uniqueKey = p2.group(2)
tempStr = re.sub(r"unique key(.*?)[(](.*?)[)]", "", tempStr)
# foreign key部分 这里其实有bug foreign key 可以有多个 但是我这里 search方法只能找到一个
p3 = re.search(r"foreign key(.*?)[(](.*?)[)](.*?)references(.*?)[(](.*?)[)]", tempStr)
# print(p2.group(0))
# print(p2.group(2) + " 当前表中值")
# print(p2.group(4).strip() + " 被参考的表名")
# print(p2.group(5).strip() + " 外表的键")
if p3 is not None:
foreignKey = p3.group(2) + "|" + p3.group(4).strip() + "|" + p3.group(5).strip()
tempStr = re.sub(r"foreign key(.*?)[(](.*?)[)](.*?)references(.*?)[(](.*?)[)]", "", tempStr)
# 分解 剩下的 这样里边全都是类似 school varchar not null 、 age int 或者是空格 的字符串
array = tempStr.split(",")
tempArray = [] # 用于临时记录去除空格的形如 school varchar not null 这样的
columnCount = 0 # 用来计数有多少个字段 因为存在全是空格的字符串
for ele in array:
if not ele.isspace(): # 自带函数 当全是空格的时候 为 true
columnCount += 1 # 用来计数有多少个字段 因为存在全是空格的字符串
tempArray.append(ele.strip()) # 去除前后两边的空格
columnNameArray = [] # 字段名数组
columnDataTypeArray = [] # 字段类型数组
notNullColumn = [] # 设置了不空的字段
for ele in tempArray:
p = re.search(r"(.*?)not( +)null", ele)
if p is None:
arrayAA = re.split(r" +", ele.strip())
else:
arrayAA = re.split(r" +", p.group(1).strip())
notNullColumn.append(arrayAA[0])
# 将提取出来的 字段名 和 字段类型 添加进去
columnNameArray.append(arrayAA[0])
columnDataTypeArray.append(arrayAA[1])
uniqueKeyList = uniqueKey.strip().split(",")
uniqueKey = myConcat(uniqueKeyList, ",")
# myConcat是自己写的函数 将notNull的column拼接起来 形如 school,home
notNullColumnStr = myConcat(notNullColumn, ",")
notNullColumnStr += "," + primaryKey + "," +uniqueKey # 加上主键也不能为空
# 拼接成形如 id#int,name#varchar,age#int,school#varchar,home#varchar,aad#varchar 的字符串
# 前边是 字段名称 后边是字段类型 两者用#分割 不同字段之间用, 分割
temp = ""
for i in range(0, len(columnNameArray)):
temp += columnNameArray[i] + "#" + columnDataTypeArray[i] + ","
columnDataTypeArrayStr = temp[:-1]
# 构造一个类 很好用
print(tempStr)
tableTemp = Table(tableName=tableName,
createTime=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
lastModifyTime=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
owner="root", rowNumber=0, columnNumber=columnCount,
primaryKey=primaryKey, uniqueKey=uniqueKey, foreignKey=foreignKey,
notNullColumn=notNullColumnStr, indexColumn="", columnDataType=columnDataTypeArrayStr)
# 将一些信息存入类中 后边还会用
tableTemp.columnNameArray = columnNameArray
tableTemp.columnDataTypeArray = columnDataTypeArray
return tableTemp
# 用来进行表的初始化 主要做的就是提取数据 然后把相关信息写入excel表中去
def tableInit(databaseLocation: str, databaseName: str, currentIndex: int, tokens):
for index in range(currentIndex, len(tokens)):
while str(tokens[index].ttype) != "None":
index += 1
tableName = str(tokens[index].tokens[0])
tempStr = str(tokens[index])
break
# 引入writer 防止覆盖 这样可以向两个工作表(sheet)中写入信息
src = databaseLocation + "\\" + databaseName.upper() + "\\" + tableName + ".xlsx"
writer = pd.ExcelWriter(src, engine='openpyxl')
initTableAttributeObject = str2TableClass(tempStr, tableName)
tempArray = list(range(1, len(attributeNameArray) + 1)) # 索引列需要
s1 = pd.Series(tempArray, index=tempArray, name="index") # 索引列 一共需要12个属性
s2 = pd.Series(attributeNameArray, index=tempArray, name="attribute") # 属性名列
s3 = pd.Series(initTableAttributeObject.toArray(), index=tempArray, name="value") # 这个是最麻烦的 注意调用了 Table类的toArray方法
s4 = pd.Series(remarksList, index=tempArray, name="备注") # 备注列 这个是写死的
attributeDf = pd.DataFrame({s1.name: s1, s2.name: s2, s3.name: s3, s4.name: s4}) # 插入4列
attributeDf = attributeDf.set_index("index") # 设置索引
dataDf = pd.DataFrame(columns=initTableAttributeObject.columnNameArray)
# 将内容写回excel表格
attributeDf.to_excel(writer, sheet_name="attribute")
dataDf.to_excel(writer, sheet_name="data", index=False)
writer.save()
writer.close()
return tableName # 返回创建表的名字
def checkSafety(attributeDf, dataDf, aa: list, dic):
primaryKeyList: list = attributeDf["value"].at[6].strip().split(",")
uniqueKeyList: list = attributeDf["value"].at[7].strip().split(",")
notNullStrArray: list = attributeDf["value"].at[9].strip().split(",")
error: str = ""
# 检查 非空约束 primary key
# print(notNullStrArray)
for ele in notNullStrArray:
if ele not in aa:
# print("字段 " + ele + " 不能为空,插入失败")
return "字段 " + ele + " 不能为空,插入失败"
# 主键不能重复
for ele in primaryKeyList:
dataDf = dataDf.loc[dataDf[ele].apply(lambda xx: str(xx) == dic[ele])]
# print(dataDf)
if dataDf.empty is False:
# print("主键重复,请重试")
return "主键重复,请重试"
return error
# 唯一键不能重复
# for ele in uniqueKeyList:
# temp = dataDf.loc[dataDf[ele].apply(lambda xx: str(xx) == dic[ele])]
# 这个函数是进行完整性校验无误后 将数据写入到excel表中 tableInsert会调用
def judgeAndInsert(src: str, aa: list, bb: list, all: list):
# 注意这里的地址 还是相对于main.py 这个文件而言的 而不是相对于 本文件Table.py
# print(aa)
# print(bb)
# aa 是需要插入列表字段列表 bb是值
writer = pd.ExcelWriter(src)
dic = {}
for index, ele in enumerate(bb):
dic[aa[index]] = ele
attributeDf = pd.read_excel(writer, sheet_name="attribute")
# print(attributeDf)
dataDf = pd.read_excel(writer, sheet_name="data", usecols=all)
# print(dataDf)
error = checkSafety(attributeDf, dataDf, aa, dic)
if error != "":
print(error)
return
dataDf = dataDf.append(dic, ignore_index=True)
attributeDf["value"].at[2] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) # 更新时间
attributeDf["value"].at[4] += 1 # 增加行数
attributeDf.to_excel(writer, sheet_name="attribute", index=False)
dataDf.to_excel(writer, sheet_name="data", index=False)
writer.save()
writer.close()
print("插入成功")
# 提取关键词 比如 id > 20 key是 id algebraicSymbol 是 > 20是 value
def getDataframeByRequirement(key, value, algebraicSymbol, dataframe: pd.DataFrame):
#print(key)
#print(value)
#print(algebraicSymbol)
tempDataFrame = None
if algebraicSymbol == ">":
tempDataFrame = dataframe.loc[dataframe[key].apply(lambda xx: xx > int(value))]
if algebraicSymbol == ">=":
tempDataFrame = dataframe.loc[dataframe[key].apply(lambda xx: xx >= int(value))]
if algebraicSymbol == "<":
tempDataFrame = dataframe.loc[dataframe[key].apply(lambda xx: xx < int(value))]
if algebraicSymbol == "<=":
tempDataFrame = dataframe.loc[dataframe[key].apply(lambda xx: xx <= int(value))]
if algebraicSymbol == "=":
tempDataFrame = dataframe.loc[dataframe[key].apply(lambda xx: str(xx) == str(value))]
if algebraicSymbol == "!=":
tempDataFrame = dataframe.loc[dataframe[key].apply(lambda xx: str(xx) != str(value))]
return tempDataFrame
# 根据表达式 得到一个字符串数组 里边有 tempList = [key, value, algebraicSymbol]
def getKeyValueAndAlgebraicSymbol(expression: str):
key = ""
value = ""
algebraicSymbol = ""
if "=" in expression:
equalIndex = expression.index("=")
if expression[equalIndex - 1] == "!":
algebraicSymbol = "!="
elif expression[equalIndex - 1] == ">":
algebraicSymbol = ">="
elif expression[equalIndex - 1] == "<":
algebraicSymbol = "<="
else:
algebraicSymbol = "="
else:
if ">" in expression:
algebraicSymbol = ">"
elif "<" in expression:
algebraicSymbol = "<"
key = (expression.split(algebraicSymbol))[0].strip()
value = (expression.split(algebraicSymbol))[1].strip()
tempList = [key, value, algebraicSymbol]
return tempList
# 根据where条件 拿到dataframe数据
def parseWhereGetDf(src: str, whereStr: str):
dataDf = pd.read_excel(src, sheet_name="data")
# strTemp3 = "sno< 20 and sno > 5 and sno >=10 and sno > 17 or sno < 12"
# strTemp4 = "sno > 17 or sno < 12 "
noOrDataDf = dataDf
if whereStr == "":
# print(dataDf)
return dataDf
else:
andSplitStrArray = re.split(r" and ", whereStr)
orList = []
for ele in andSplitStrArray:
if " or " in ele:
orSplitStrArray = re.split(r" or ", ele)
orDfList = []
# 拿到所有的or 中的表达式 做一个交集
for factor in orSplitStrArray:
tempArray = getKeyValueAndAlgebraicSymbol(factor)
OrDataDf = getDataframeByRequirement(tempArray[0], tempArray[1], tempArray[2], dataDf)
orDfList.append(OrDataDf)
oneTempOrDf = orDfList[0]
# 取所有的并集 用or隔开的表达式的并集
for element in orDfList:
oneTempOrDf = pd.merge(oneTempOrDf, element, how="outer") # 取并集
orList.append(oneTempOrDf)
else:
tempArray = getKeyValueAndAlgebraicSymbol(ele)
key = tempArray[0]
value = tempArray[1]
algebraicSymbol = tempArray[2]
noOrDataDf = getDataframeByRequirement(key, value, algebraicSymbol, noOrDataDf)
finallyDf = noOrDataDf
# 举个例子 sno< 20 and sno > 5 and sno >=10 and sno > 17 or sno < 12 and sno > 17 or sno < 12
# orlist中有 2个元素 最终下方函数是对三个dataframe做交集
for ele in orList:
finallyDf = | pd.merge(finallyDf, ele, how="inner") | pandas.merge |
import pandas as pd
import numpy as np
df=pd.read_csv("Train.csv")
df_test= | pd.read_csv("Test.csv") | pandas.read_csv |
import arff
import copy
import json
import logging
import math
import os
import pandas as pd
import warnings
from functools import wraps
from a2ml.api.utils import fsclient, get_uid, get_uid4, remove_dups_from_list, process_arff_line, download_file, retry_helper, parse_url
from a2ml.api.utils.local_fsclient import LocalFSClient
# To avoid warnings for inplace operation on datasets
pd.options.mode.chained_assignment = None
class DataFrame(object):
BOOLEAN_WORDS_TRUE = ['yes', 'on']
BOOLEAN_WORDS_FALSE = ['no', 'off']
def __init__(self, options):
self.options = options
self.categoricals = {}
self.transforms_log = [[],[],[],[]]
self.df = None
self.dataset_name = None
self.loaded_columns = None
self.from_pandas = False
def _get_compression(self, extension):
compression = self.options.get('data_compression', 'infer')
if extension.endswith('.gz') or extension.endswith('.gzip'):
compression = 'gzip'
elif extension.endswith('.bz2'):
compression = 'bz2'
elif extension.endswith('.zip'):
compression = 'zip'
elif extension.endswith('.xz'):
compression = 'xz'
return compression
@staticmethod
def create_dataframe(data_path=None, records=None, features=None, reset_index=False):
if data_path is not None:
if isinstance(data_path, pd.DataFrame):
ds = DataFrame({})
ds.df = data_path
elif isinstance(data_path, DataFrame):
ds = data_path
elif isinstance(data_path, list):
ds = DataFrame({})
ds.load_records(data_path)
elif isinstance(data_path, dict):
ds = DataFrame({})
if 'data' in data_path and 'columns' in data_path:
ds.load_records(data_path['data'], features=data_path['columns'])
else:
ds.load_records(data_path)
else:
ds = DataFrame({'data_path': data_path})
ds.load(features = features)
else:
ds = DataFrame({})
ds.load_records(records, features=features)
if reset_index and ds.df is not None:
ds.df.reset_index(drop=True, inplace=True)
# if data_path:
# ds = DataFrame({'data_path': data_path})
# ds.load(features = features)
# elif records is not None and isinstance(records, pd.DataFrame):
# ds = DataFrame({})
# ds.df = records
# if features:
# ds.df = ds.df[features]
# ds.from_pandas = True
# else:
# ds = DataFrame({})
# ds.load_records(records, features=features)
return ds
@staticmethod
def load_from_files(files, features=None):
for file in files:
path = file if type(file) == str else file['path']
fsclient.wait_for_file(path, True)
try:
df = retry_helper(lambda: DataFrame.create_dataframe(path, None, features))
yield (file, df)
except Exception as exc:
logging.exception("load_from_files failed for: %s. Error: %s"%(path, exc))
@staticmethod
def is_dataframe(data):
return isinstance(data, pd.DataFrame) or isinstance(data, DataFrame)
def load_from_file(self, path, features=None, nrows=None):
from collections import OrderedDict
extension = path
if self.options.get('data_extension', 'infer') != 'infer':
extension = self.options['data_extension']
if self.options.get('content_type') == 'multipart':
fsclient.merge_folder_files(path)
if extension.endswith('.arff') or extension.endswith('.arff.gz'):
arffFile = None
class ArffFile:
def __init__(self, file):
self.file = file
self.date_attrs = {}
def __iter__(self):
return self
def __next__(self):
line = process_arff_line(next(self.file), self.date_attrs)
return line
try:
with fsclient.open(path, 'r') as f:
arffFile = ArffFile(f)
arff_data = arff.load(arffFile, return_type=arff.COO)
convert_arff = DataFrame._convert_arff_coo
except arff.BadLayout:
with fsclient.open(path, 'r') as f:
arffFile = ArffFile(f)
arff_data = arff.load(arffFile, return_type=arff.DENSE)
convert_arff = DataFrame._convert_arff_dense
columns = [a[0] for a in arff_data['attributes']]
series = convert_arff(features, columns, arff_data['data'])
res = pd.DataFrame.from_dict(OrderedDict(
(c, s) for c, s in zip(columns, series) if s is not None
))
for date_field, fmt in arffFile.date_attrs.items():
res[date_field] = | pd.to_datetime(res[date_field], infer_datetime_format=True, errors='ignore', utc=True) | pandas.to_datetime |
import os
import sys
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
import happybase
from mrjob.job import MRJob
from mrjob.protocol import PickleProtocol
# # mongo clients libs
from pymongo import MongoClient, ASCENDING, DESCENDING
# # Generic imports
import glob
import pandas as pd
from json import load
from datetime import datetime
import ast
import pickle
from module_edinet._model_functions.gam_functions import set_r_environment, train_gaussian_mixture_model, prepare_dataframe, train_linear, clean_linear, predict_gaussian_mixture_model, predict_model
import numpy as np
import ast
import zlib
def calculate_frequency(dataset):
if len(dataset.index) > 1:
return (pd.Series(dataset.index[1:]) - pd.Series(dataset.index[:-1])).value_counts().index[0]
else:
return None
class MRJob_align(MRJob):
INTERNAL_PROTOCOL = PickleProtocol
def mapper_init(self):
fn = glob.glob('*.json')
self.config = load(open(fn[0]))
self.mongo = MongoClient(self.config['mongodb']['host'], self.config['mongodb']['port'])
self.mongo[self.config['mongodb']['db']].authenticate(
self.config['mongodb']['username'],
self.config['mongodb']['password']
)
self.devices = self.config['devices']
def reducer_init(self):
# recover json configuration uploaded with script
fn = glob.glob('*.json')
self.config = load(open(fn[0]))
r_file = NamedTemporaryFile(delete=False, suffix='.R')
sys.stderr.write(os.popen('whoami').read() )
with open('_model_functions/gam_functions.R', 'r') as rcode:
r_file.write(bytes(rcode.read(), encoding="utf8"))
set_r_environment(r_file.name)
os.unlink(r_file.name)
self.company = self.config['company']
self.devices = self.config['devices']
def mapper(self, _, doc):
# emits modelling_units as key
# emits deviceId, consumption, ts
columns = [(x[0], x[1]) for x in self.config['hive']['final_table_fields']]
ret = doc.split('\t')
try:
modelling_units = self.devices[ret[0]]
except:
return
d = {}
for i, c in enumerate(columns):
if c[0] == "ts":
d[c[0]] = datetime.fromtimestamp(float(ret[i]))
elif c[1] == "float":
try:
d[c[0]] = float(ret[i])
except:
d[c[0]] = np.NaN
else:
d[c[0]] = ret[i]
for modelling_unit in modelling_units:
yield modelling_unit, d
def reducer(self, key, values):
sys.stderr.write("calculating prediction\n")
# obtain the needed info from the key
modelling_unit, multipliers, lat, lon, timezone = key.split('~')
lat = float(lat)
lon = float(lon)
multipliers = ast.literal_eval(multipliers) # string to dict
multiplier = {}
for i in multipliers:
multiplier[i['deviceId']] = i['multiplier']
columns = [x[0] for x in self.config['hive']['final_table_fields']]
df = pd.DataFrame.from_records(values, index='ts', columns=columns)
energy_type = df.energyType.unique()[0]
grouped = df.groupby('deviceId')
df_new_hourly = None
for device, data in grouped:
if device not in multiplier.keys():
continue
data = data[~data.index.duplicated(keep='last')]
data = data.sort_index()
if df_new_hourly is None:
df_new_hourly = data[['value']] * multiplier[device]
else:
df_new_hourly += data[['value']] * multiplier[device]
weather = df.drop(['value', 'energyType', 'deviceId'], axis=1)
weather = weather[~weather.index.duplicated(keep='last')]
df_new_hourly = df_new_hourly.join(weather)
df_new_hourly = df_new_hourly[self.config['module_config']['model_features']]
df_new_hourly = df_new_hourly.sort_index()
df_value = df_new_hourly[['value']].resample('H').sum()
df_weather = df_new_hourly[["temperature", "windSpeed", "GHI", "windBearing"]].resample('H').max()
df_new_hourly = df_value.join(df_weather)
sys.stderr.write("dataframe has been recovered\n")
freq = calculate_frequency(df_new_hourly)
whole_day_index = len(np.arange( | pd.Timedelta('1 days') | pandas.Timedelta |
import decimal
import numpy as np
from numpy import iinfo
import pytest
import pandas as pd
from pandas import to_numeric
from pandas.util import testing as tm
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = pd.Series([], dtype=object)
res = to_numeric(s)
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(res, expected)
# Original issue example
res = to_numeric(s, errors='coerce', downcast='integer')
expected = pd.Series([], dtype=np.int8)
tm.assert_series_equal(res, expected)
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
s = pd.Series(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
# GH 14827
df = pd.DataFrame(dict(
a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'],
b=[1.0, 2.0, 3.0, 4.0],
))
expected = pd.DataFrame(dict(
a=[1.2, 3.14, np.inf, 0.1],
b=[1.0, 2.0, 3.0, 4.0],
))
# Test to_numeric over one column
df_copy = df.copy()
df_copy['a'] = df_copy['a'].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
# Test to_numeric over multiple columns
df_copy = df.copy()
df_copy[['a', 'b']] = df_copy[['a', 'b']].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
def test_numeric_lists_and_arrays(self):
# Test to_numeric with embedded lists and arrays
df = pd.DataFrame(dict(
a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 1.6, 0.1],
))
tm.assert_frame_equal(df, expected)
df = pd.DataFrame(dict(
a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 0.1],
))
tm.assert_frame_equal(df, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
@pytest.mark.parametrize("errors", [None, "ignore", "raise", "coerce"])
def test_type_check(self, errors):
# see gh-11776
df = pd.DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]})
kwargs = dict(errors=errors) if errors is not None else dict()
error_ctx = pytest.raises(TypeError, match="1-d array")
with error_ctx:
to_numeric(df, **kwargs)
def test_scalar(self):
assert pd.to_numeric(1) == 1
assert pd.to_numeric(1.1) == 1.1
assert pd.to_numeric('1') == 1
assert pd.to_numeric('1.1') == 1.1
with pytest.raises(ValueError):
to_numeric('XX', errors='raise')
assert to_numeric('XX', errors='ignore') == 'XX'
assert np.isnan(to_numeric('XX', errors='coerce'))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = pd.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx')
exp = np.array([1.5, 2.7, 3.4])
res = | pd.to_numeric(idx) | pandas.to_numeric |
# Predict_Gesture_Twitch.py
# Description: Recieved Data from ESP32 Micro via the AGRB-Training-Data-Capture.ino file, make gesture prediction and tell it to twitch
# Written by: <NAME>
# Created on July 13th 2020
import numpy as np
import pandas as pd
import datetime
import re
import os, os.path
import time
import random
import tensorflow as tf
import serial
import socket
import cfg
PORT = "/dev/ttyUSB0"
#PORT = "/dev/ttyUSB1"
#PORT = "COM8"
serialport = None
serialport = serial.Serial(PORT, 115200, timeout=0.05)
#load Model
model = tf.keras.models.load_model('../Model/cnn_model.h5')
#Creating our socket and passing on info for twitch
sock = socket.socket()
sock.connect((cfg.HOST,cfg.PORT))
sock.send("PASS {}\r\n".format(cfg.PASS).encode("utf-8"))
sock.send("NICK {}\r\n".format(cfg.NICK).encode("utf-8"))
sock.send("JOIN {}\r\n".format(cfg.CHAN).encode("utf-8"))
sock.setblocking(0)
#handling of some of the string characters in the twitch message
chat_message = re.compile(r"^:\w+!\w+@\w+.tmi.twitch.tv PRIVMSG #\w+ :")
#Lets create a new function that allows us to chat a little easier. Create two variables for the socket and messages to be passed in and then the socket send function with proper configuration for twitch messages.
def chat(s,msg):
s.send("PRIVMSG {} :{}\r\n".format(cfg.CHAN,msg).encode("utf-8"))
#The next two functions allow for twitch messages from socket receive to be passed in and searched to parse out the message and the user who typed it.
def getMSG(r):
mgs = chat_message.sub("", r)
return mgs
def getUSER(r):
try:
user=re.search(r"\w+",r).group(0)
except AttributeError:
user ="tvheadbot"
print(AttributeError)
return user
#Get Data from imu. Waits for incomming data and data stop
def get_imu_data():
global serialport
if not serialport:
# open serial port
serialport = serial.Serial(PORT, 115200, timeout=0.05)
# check which port was really used
print("Opened", serialport.name)
# Flush input
time.sleep(3)
serialport.readline()
# Poll the serial port
line = str(serialport.readline(),'utf-8')
if not line:
return None
vals = line.replace("Uni:", "").strip().split(',')
if len(vals) != 7:
return None
try:
vals = [float(i) for i in vals]
except ValueError:
return ValueError
return vals
# Create Reshape function for each row of the dataset
def reshape_function(data):
reshaped_data = tf.reshape(data, [-1, 3, 1])
return reshaped_data
# header for the incomming data
header = ["deltaTime","Acc_X","Acc_Y","Acc_Z","Gyro_X","Gyro_Y","Gyro_Z"]
#Create a way to see the length of the data incomming, needs to be 760 points. Used for testing incomming data
def dataFrameLenTest(data):
df= | pd.DataFrame(data,columns=header) | pandas.DataFrame |
from __future__ import unicode_literals
import copy
import io
import itertools
import json
import os
import shutil
import string
import sys
from collections import OrderedDict
from future.utils import iteritems
from unittest import TestCase
import pandas as pd
import pytest
from backports.tempfile import TemporaryDirectory
from tempfile import NamedTemporaryFile
from hypothesis import (
given,
HealthCheck,
reproduce_failure,
settings,
)
from hypothesis.strategies import (
dictionaries,
integers,
floats,
just,
lists,
text,
tuples,
)
from mock import patch, Mock
from oasislmf.model_preparation.manager import OasisManager as om
from oasislmf.model_preparation.pipeline import OasisFilesPipeline as ofp
from oasislmf.models.model import OasisModel
from oasislmf.utils.exceptions import OasisException
from oasislmf.utils.fm import (
unified_canonical_fm_profile_by_level_and_term_group,
)
from oasislmf.utils.metadata import (
OASIS_COVERAGE_TYPES,
OASIS_FM_LEVELS,
OASIS_KEYS_STATUS,
OASIS_PERILS,
OED_COVERAGE_TYPES,
OED_PERILS,
)
from ..models.fakes import fake_model
from ..data import (
canonical_accounts,
canonical_accounts_profile,
canonical_exposure,
canonical_exposure_profile,
canonical_oed_accounts,
canonical_oed_accounts_profile,
canonical_oed_exposure,
canonical_oed_exposure_profile,
fm_input_items,
gul_input_items,
keys,
oasis_fm_agg_profile,
oed_fm_agg_profile,
write_canonical_files,
write_canonical_oed_files,
write_keys_files,
)
class AddModel(TestCase):
def test_models_is_empty___model_is_added_to_model_dict(self):
model = fake_model('supplier', 'model', 'version')
manager = om()
manager.add_model(model)
self.assertEqual({model.key: model}, manager.models)
def test_manager_already_contains_a_model_with_the_given_key___model_is_replaced_in_models_dict(self):
first = fake_model('supplier', 'model', 'version')
second = fake_model('supplier', 'model', 'version')
manager = om(oasis_models=[first])
manager.add_model(second)
self.assertIs(second, manager.models[second.key])
def test_manager_already_contains_a_diferent_model___model_is_added_to_dict(self):
first = fake_model('first', 'model', 'version')
second = fake_model('second', 'model', 'version')
manager = om(oasis_models=[first])
manager.add_model(second)
self.assertEqual({
first.key: first,
second.key: second,
}, manager.models)
class DeleteModels(TestCase):
def test_models_is_not_in_manager___no_model_is_removed(self):
manager = om([
fake_model('supplier', 'model', 'version'),
fake_model('supplier2', 'model2', 'version2'),
])
expected = manager.models
manager.delete_models([fake_model('supplier3', 'model3', 'version3')])
self.assertEqual(expected, manager.models)
def test_models_exist_in_manager___models_are_removed(self):
models = [
fake_model('supplier', 'model', 'version'),
fake_model('supplier2', 'model2', 'version2'),
fake_model('supplier3', 'model3', 'version3'),
]
manager = om(models)
manager.delete_models(models[1:])
self.assertEqual({models[0].key: models[0]}, manager.models)
class GetCanonicalExposureProfile(TestCase):
def test_model_and_kwargs_are_not_set___result_is_null(self):
profile = om().get_canonical_exposure_profile()
self.assertEqual(None, profile)
@given(expected=dictionaries(text(), text()))
def test_model_is_set_with_profile_json___models_profile_is_set_to_expected_json(self, expected):
model = fake_model(resources={'canonical_exposure_profile_json': json.dumps(expected)})
profile = om().get_canonical_exposure_profile(oasis_model=model)
self.assertEqual(expected, profile)
self.assertEqual(expected, model.resources['canonical_exposure_profile'])
@given(model_profile=dictionaries(text(), text()), kwargs_profile=dictionaries(text(), text()))
def test_model_is_set_with_profile_json_and_profile_json_is_passed_through_kwargs___kwargs_profile_is_used(
self,
model_profile,
kwargs_profile
):
model = fake_model(resources={'canonical_exposure_profile_json': json.dumps(model_profile)})
profile = om().get_canonical_exposure_profile(oasis_model=model, canonical_exposure_profile_json=json.dumps(kwargs_profile))
self.assertEqual(kwargs_profile, profile)
self.assertEqual(kwargs_profile, model.resources['canonical_exposure_profile'])
@given(expected=dictionaries(text(), text()))
def test_model_is_set_with_profile_json_path___models_profile_is_set_to_expected_json(self, expected):
with NamedTemporaryFile('w') as f:
json.dump(expected, f)
f.flush()
model = fake_model(resources={'canonical_exposure_profile_path': f.name})
profile = om().get_canonical_exposure_profile(oasis_model=model)
self.assertEqual(expected, profile)
self.assertEqual(expected, model.resources['canonical_exposure_profile'])
@given(model_profile=dictionaries(text(), text()), kwargs_profile=dictionaries(text(), text()))
@settings(suppress_health_check=[HealthCheck.too_slow])
def test_model_is_set_with_profile_json_path_and_profile_json_path_is_passed_through_kwargs___kwargs_profile_is_used(
self,
model_profile,
kwargs_profile
):
with NamedTemporaryFile('w') as model_file, NamedTemporaryFile('w') as kwargs_file:
json.dump(model_profile, model_file)
model_file.flush()
json.dump(kwargs_profile, kwargs_file)
kwargs_file.flush()
model = fake_model(resources={'canonical_exposure_profile_path': model_file.name})
profile = om().get_canonical_exposure_profile(oasis_model=model, canonical_exposure_profile_path=kwargs_file.name)
self.assertEqual(kwargs_profile, profile)
self.assertEqual(kwargs_profile, model.resources['canonical_exposure_profile'])
class CreateModel(TestCase):
def create_model(
self,
lookup='lookup',
keys_file_path='key_file_path',
keys_errors_file_path='keys_error_file_path',
model_exposure_file_path='model_exposure_file_path'
):
model = fake_model(resources={'lookup': lookup})
model.resources['oasis_files_pipeline'].keys_file_path = keys_file_path
model.resources['oasis_files_pipeline'].keys_errors_file_path = keys_errors_file_path
model.resources['oasis_files_pipeline'].model_exposure_file_path = model_exposure_file_path
return model
@given(
lookup=text(min_size=1, alphabet=string.ascii_letters),
keys_file_path=text(min_size=1, alphabet=string.ascii_letters),
keys_errors_file_path=text(min_size=1, alphabet=string.ascii_letters),
exposure_file_path=text(min_size=1, alphabet=string.ascii_letters)
)
def test_supplier_and_model_and_version_only_are_supplied___correct_model_is_returned(
self,
lookup,
keys_file_path,
keys_errors_file_path,
exposure_file_path
):
model = self.create_model(lookup=lookup, keys_file_path=keys_file_path, keys_errors_file_path=keys_errors_file_path, model_exposure_file_path=exposure_file_path)
with patch('oasislmf.model_preparation.lookup.OasisLookupFactory.save_results', Mock(return_value=(keys_file_path, 1, keys_errors_file_path, 1))) as oklf_mock:
res_keys_file_path, res_keys_errors_file_path = om().get_keys(oasis_model=model)
oklf_mock.assert_called_once_with(
lookup,
keys_file_path,
errors_fp=keys_errors_file_path,
model_exposure_fp=exposure_file_path
)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_file_path, keys_file_path)
self.assertEqual(res_keys_file_path, keys_file_path)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_errors_file_path, keys_errors_file_path)
self.assertEqual(res_keys_errors_file_path, keys_errors_file_path)
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version=text(alphabet=string.ascii_letters, min_size=1),
model_lookup=text(min_size=1, alphabet=string.ascii_letters),
model_keys_fp=text(alphabet=string.ascii_letters, min_size=1),
model_keys_errors_fp=text(alphabet=string.ascii_letters, min_size=1),
model_exposure_fp=text(alphabet=string.ascii_letters, min_size=1),
lookup=text(min_size=1, alphabet=string.ascii_letters),
keys_fp=text(alphabet=string.ascii_letters, min_size=1),
keys_errors_fp=text(alphabet=string.ascii_letters, min_size=1),
exposure_fp=text(alphabet=string.ascii_letters, min_size=1)
)
def test_supplier_and_model_and_version_and_absolute_oasis_files_path_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version,
model_lookup,
model_keys_fp,
model_keys_errors_fp,
model_exposure_fp,
lookup,
keys_fp,
keys_errors_fp,
exposure_fp
):
resources={
'lookup': model_lookup,
'keys_file_path': model_keys_fp,
'keys_errors_file_path': model_keys_errors_fp,
'model_exposure_file_path': model_exposure_fp
}
model = om().create_model(supplier_id, model_id, version, resources=resources)
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version)
with patch('oasislmf.model_preparation.lookup.OasisLookupFactory.save_results', Mock(return_value=(keys_fp, 1, keys_errors_fp, 1))) as oklf_mock:
res_keys_file_path, res_keys_errors_file_path = om().get_keys(
oasis_model=model,
lookup=lookup,
model_exposure_file_path=exposure_fp,
keys_file_path=keys_fp,
keys_errors_file_path=keys_errors_fp
)
oklf_mock.assert_called_once_with(
lookup,
keys_fp,
errors_fp=keys_errors_fp,
model_exposure_fp=exposure_fp
)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_file_path, keys_fp)
self.assertEqual(res_keys_file_path, keys_fp)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_errors_file_path, keys_errors_fp)
self.assertEqual(res_keys_errors_file_path, keys_errors_fp)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
self.assertEqual(resources, model.resources)
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
self.assertIsNone(model.resources.get('canonical_exposure_profile'))
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
oasis_files_path=text(alphabet=string.ascii_letters, min_size=1)
)
def test_supplier_and_model_and_version_and_relative_oasis_files_path_only_are_supplied___correct_model_is_returned_with_absolute_oasis_file_path(
self,
supplier_id,
model_id,
version_id,
oasis_files_path
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
oasis_files_path = oasis_files_path.lstrip(os.path.sep)
resources={'oasis_files_path': oasis_files_path}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
self.assertTrue(os.path.isabs(model.resources['oasis_files_path']))
self.assertEqual(os.path.abspath(resources['oasis_files_path']), model.resources['oasis_files_path'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
self.assertIsNone(model.resources.get('canonical_exposure_profile'))
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
canonical_exposure_profile=dictionaries(text(min_size=1), text(min_size=1))
)
def test_supplier_and_model_and_version_and_canonical_exposure_profile_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version_id,
canonical_exposure_profile
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
resources={'canonical_exposure_profile': canonical_exposure_profile}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
self.assertEqual(resources['canonical_exposure_profile'], model.resources['canonical_exposure_profile'])
expected_oasis_files_path = os.path.abspath(os.path.join('Files', expected_key.replace('/', '-')))
self.assertEqual(expected_oasis_files_path, model.resources['oasis_files_path'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
oasis_files_path=text(alphabet=string.ascii_letters, min_size=1),
canonical_exposure_profile=dictionaries(text(min_size=1), text(min_size=1))
)
def test_supplier_and_model_and_version_and_relative_oasis_files_path_and_canonical_exposure_profile_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version_id,
oasis_files_path,
canonical_exposure_profile
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
resources={'oasis_files_path': oasis_files_path, 'canonical_exposure_profile': canonical_exposure_profile}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
self.assertTrue(os.path.isabs(model.resources['oasis_files_path']))
self.assertEqual(os.path.abspath(resources['oasis_files_path']), model.resources['oasis_files_path'])
self.assertEqual(resources['canonical_exposure_profile'], model.resources['canonical_exposure_profile'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
oasis_files_path=text(alphabet=string.ascii_letters, min_size=1),
canonical_exposure_profile=dictionaries(text(min_size=1), text(min_size=1))
)
def test_supplier_and_model_and_version_and_absolute_oasis_files_path_and_canonical_exposure_profile_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version_id,
oasis_files_path,
canonical_exposure_profile
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
resources={'oasis_files_path': os.path.abspath(oasis_files_path), 'canonical_exposure_profile': canonical_exposure_profile}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
self.assertEqual(resources['oasis_files_path'], model.resources['oasis_files_path'])
self.assertEqual(resources['canonical_exposure_profile'], model.resources['canonical_exposure_profile'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
source_accounts_file_path=text(alphabet=string.ascii_letters, min_size=1)
)
def test_supplier_and_model_and_version_and_source_accounts_file_path_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version_id,
source_accounts_file_path
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
resources={'source_accounts_file_path': source_accounts_file_path}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
expected_oasis_files_path = os.path.abspath(os.path.join('Files', expected_key.replace('/', '-')))
self.assertEqual(expected_oasis_files_path, model.resources['oasis_files_path'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
self.assertEqual(resources['source_accounts_file_path'], model.resources['source_accounts_file_path'])
self.assertIsNone(model.resources.get('canonical_exposure_profile'))
self.assertIsNone(model.resources.get('canonical_accounts_profile'))
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
source_accounts_file_path=text(alphabet=string.ascii_letters, min_size=1),
canonical_accounts_profile=dictionaries(text(min_size=1), text(min_size=1))
)
def test_supplier_and_model_and_version_and_source_accounts_file_path_and_canonical_accounts_profile_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version_id,
source_accounts_file_path,
canonical_accounts_profile
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
resources={'source_accounts_file_path': source_accounts_file_path, 'canonical_accounts_profile': canonical_accounts_profile}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
expected_oasis_files_path = os.path.abspath(os.path.join('Files', expected_key.replace('/', '-')))
self.assertEqual(expected_oasis_files_path, model.resources['oasis_files_path'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
self.assertIsNone(model.resources.get('canonical_exposure_profile'))
self.assertEqual(resources['source_accounts_file_path'], model.resources['source_accounts_file_path'])
self.assertEqual(resources['canonical_accounts_profile'], model.resources['canonical_accounts_profile'])
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
canonical_exposure_profile=dictionaries(text(min_size=1), text(min_size=1)),
source_accounts_file_path=text(alphabet=string.ascii_letters, min_size=1),
canonical_accounts_profile=dictionaries(text(min_size=1), text(min_size=1))
)
def test_supplier_and_model_and_version_and_canonical_exposure_profile_and_source_accounts_file_path_and_canonical_accounts_profile_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version_id,
canonical_exposure_profile,
source_accounts_file_path,
canonical_accounts_profile
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
resources={
'canonical_exposure_profile': canonical_exposure_profile,
'source_accounts_file_path': source_accounts_file_path,
'canonical_accounts_profile': canonical_accounts_profile
}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
expected_oasis_files_path = os.path.abspath(os.path.join('Files', expected_key.replace('/', '-')))
self.assertEqual(expected_oasis_files_path, model.resources['oasis_files_path'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
self.assertEqual(resources['canonical_exposure_profile'], model.resources.get('canonical_exposure_profile'))
self.assertEqual(resources['source_accounts_file_path'], model.resources['source_accounts_file_path'])
self.assertEqual(resources['canonical_accounts_profile'], model.resources['canonical_accounts_profile'])
@given(
supplier_id=text(alphabet=string.ascii_letters, min_size=1),
model_id=text(alphabet=string.ascii_letters, min_size=1),
version_id=text(alphabet=string.ascii_letters, min_size=1),
oasis_files_path=text(alphabet=string.ascii_letters, min_size=1),
canonical_exposure_profile=dictionaries(text(min_size=1), text(min_size=1)),
source_accounts_file_path=text(alphabet=string.ascii_letters, min_size=1),
canonical_accounts_profile=dictionaries(text(min_size=1), text(min_size=1))
)
def test_supplier_and_model_and_version_and_absolute_oasis_files_path_and_canonical_exposure_profile_and_source_accounts_file_path_and_canonical_accounts_profile_only_are_supplied___correct_model_is_returned(
self,
supplier_id,
model_id,
version_id,
oasis_files_path,
canonical_exposure_profile,
source_accounts_file_path,
canonical_accounts_profile
):
expected_key = '{}/{}/{}'.format(supplier_id, model_id, version_id)
resources={
'oasis_files_path': os.path.abspath(oasis_files_path),
'canonical_exposure_profile': canonical_exposure_profile,
'source_accounts_file_path': source_accounts_file_path,
'canonical_accounts_profile': canonical_accounts_profile
}
model = om().create_model(supplier_id, model_id, version_id, resources=resources)
self.assertTrue(isinstance(model, OasisModel))
self.assertEqual(expected_key, model.key)
self.assertEqual(resources['oasis_files_path'], model.resources['oasis_files_path'])
self.assertTrue(isinstance(model.resources['oasis_files_pipeline'], ofp))
self.assertEqual(resources['canonical_exposure_profile'], model.resources.get('canonical_exposure_profile'))
self.assertEqual(resources['source_accounts_file_path'], model.resources['source_accounts_file_path'])
self.assertEqual(resources['canonical_accounts_profile'], model.resources['canonical_accounts_profile'])
class LoadCanonicalAccountsProfile(TestCase):
def test_model_and_kwargs_are_not_set___result_is_null(self):
profile = om().get_canonical_accounts_profile()
self.assertEqual(None, profile)
@given(expected=dictionaries(text(), text()))
def test_model_is_set_with_profile_json___models_profile_is_set_to_expected_json(self, expected):
model = fake_model(resources={'canonical_accounts_profile_json': json.dumps(expected)})
profile = om().get_canonical_accounts_profile(oasis_model=model)
self.assertEqual(expected, profile)
self.assertEqual(expected, model.resources['canonical_accounts_profile'])
@given(model_profile=dictionaries(text(), text()), kwargs_profile=dictionaries(text(), text()))
def test_model_is_set_with_profile_json_and_profile_json_is_passed_through_kwargs___kwargs_profile_is_used(
self,
model_profile,
kwargs_profile
):
model = fake_model(resources={'canonical_accounts_profile_json': json.dumps(model_profile)})
profile = om().get_canonical_accounts_profile(oasis_model=model, canonical_accounts_profile_json=json.dumps(kwargs_profile))
self.assertEqual(kwargs_profile, profile)
self.assertEqual(kwargs_profile, model.resources['canonical_accounts_profile'])
@given(expected=dictionaries(text(), text()))
def test_model_is_set_with_profile_json_path___models_profile_is_set_to_expected_json(self, expected):
with NamedTemporaryFile('w') as f:
json.dump(expected, f)
f.flush()
model = fake_model(resources={'canonical_accounts_profile_path': f.name})
profile = om().get_canonical_accounts_profile(oasis_model=model)
self.assertEqual(expected, profile)
self.assertEqual(expected, model.resources['canonical_accounts_profile'])
@given(model_profile=dictionaries(text(), text()), kwargs_profile=dictionaries(text(), text()))
def test_model_is_set_with_profile_json_path_and_profile_json_path_is_passed_through_kwargs___kwargs_profile_is_used(
self,
model_profile,
kwargs_profile
):
with NamedTemporaryFile('w') as model_file, NamedTemporaryFile('w') as kwargs_file:
json.dump(model_profile, model_file)
model_file.flush()
json.dump(kwargs_profile, kwargs_file)
kwargs_file.flush()
model = fake_model(resources={'canonical_accounts_profile_path': model_file.name})
profile = om().get_canonical_accounts_profile(oasis_model=model, canonical_accounts_profile_path=kwargs_file.name)
self.assertEqual(kwargs_profile, profile)
self.assertEqual(kwargs_profile, model.resources['canonical_accounts_profile'])
class GetFmAggregationProfile(TestCase):
def setUp(self):
self.profile = oasis_fm_agg_profile
def test_model_and_kwargs_are_not_set___result_is_null(self):
profile = om().get_fm_aggregation_profile()
self.assertEqual(None, profile)
def test_model_is_set_with_profile_json___models_profile_is_set_to_expected_json(self):
expected = self.profile
profile_json = json.dumps(self.profile)
model = fake_model(resources={'fm_agg_profile_json': profile_json})
profile = om().get_fm_aggregation_profile(oasis_model=model)
self.assertEqual(expected, profile)
self.assertEqual(expected, model.resources['fm_agg_profile'])
def test_model_is_set_with_profile_json_and_profile_json_is_passed_through_kwargs___kwargs_profile_is_used(self):
model = fake_model(resources={'fm_agg_profile_json': json.dumps(self.profile)})
profile = om().get_fm_aggregation_profile(oasis_model=model, fm_agg_profile_json=json.dumps(self.profile))
self.assertEqual(self.profile, profile)
self.assertEqual(self.profile, model.resources['fm_agg_profile'])
def test_model_is_set_with_profile_path___models_profile_is_set_to_expected_json(self):
expected = self.profile
with NamedTemporaryFile('w') as f:
json.dump(expected, f)
f.flush()
model = fake_model(resources={'fm_agg_profile_path': f.name})
profile = om().get_fm_aggregation_profile(oasis_model=model)
self.assertEqual(expected, profile)
self.assertEqual(expected, model.resources['fm_agg_profile'])
def test_model_is_set_with_profile_path_and_profile_path_is_passed_through_kwargs___kwargs_profile_is_used(
self
):
with NamedTemporaryFile('w') as model_file, NamedTemporaryFile('w') as kwargs_file:
json.dump(self.profile, model_file)
model_file.flush()
json.dump(self.profile, kwargs_file)
kwargs_file.flush()
model = fake_model(resources={'fm_agg_profile_path': model_file.name})
profile = om().get_fm_aggregation_profile(oasis_model=model, fm_agg_profile_path=kwargs_file.name)
self.assertEqual(self.profile, profile)
self.assertEqual(self.profile, model.resources['fm_agg_profile'])
@pytest.mark.skipif(True, reason="CSV file transformations to be removed")
class TransformSourceToCanonical(TestCase):
@given(
source_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters),
source_to_canonical_exposure_transformation_file_path=text(min_size=1, alphabet=string.ascii_letters),
source_exposure_validation_file_path=text(alphabet=string.ascii_letters),
canonical_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters)
)
def test_model_is_not_set___parameters_are_taken_from_kwargs(
self,
source_exposure_file_path,
source_to_canonical_exposure_transformation_file_path,
source_exposure_validation_file_path,
canonical_exposure_file_path
):
trans_call_mock = Mock()
with patch('oasislmf.model_preparation.csv_trans.Translator', Mock(return_value=trans_call_mock)) as trans_mock:
om().transform_source_to_canonical(
source_exposure_file_path=source_exposure_file_path,
source_to_canonical_exposure_transformation_file_path=source_to_canonical_exposure_transformation_file_path,
canonical_exposure_file_path=canonical_exposure_file_path
)
trans_mock.assert_called_once_with(
os.path.abspath(source_exposure_file_path),
os.path.abspath(canonical_exposure_file_path),
os.path.abspath(source_to_canonical_exposure_transformation_file_path),
xsd_path=None,
append_row_nums=True,
)
trans_call_mock.assert_called_once_with()
@given(
source_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters),
source_to_canonical_exposure_transformation_file_path=text(min_size=1, alphabet=string.ascii_letters),
source_exposure_validation_file_path=text(alphabet=string.ascii_letters),
canonical_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters)
)
def test_model_is_set___parameters_are_taken_from_model(
self,
source_exposure_file_path,
source_to_canonical_exposure_transformation_file_path,
source_exposure_validation_file_path,
canonical_exposure_file_path):
model = fake_model(resources={
'source_exposure_file_path': source_exposure_file_path,
'source_exposure_validation_file_path': source_exposure_validation_file_path,
'source_to_canonical_exposure_transformation_file_path': source_to_canonical_exposure_transformation_file_path,
})
model.resources['oasis_files_pipeline'].canonical_exposure_path = canonical_exposure_file_path
trans_call_mock = Mock()
with patch('oasislmf.model_preparation.csv_trans.Translator', Mock(return_value=trans_call_mock)) as trans_mock:
#import ipdb; ipdb.set_trace()
om().transform_source_to_canonical(
source_exposure_file_path=source_exposure_file_path,
source_to_canonical_exposure_transformation_file_path=source_to_canonical_exposure_transformation_file_path,
canonical_exposure_file_path=canonical_exposure_file_path
)
trans_mock.assert_called_once_with(
os.path.abspath(source_exposure_file_path),
os.path.abspath(canonical_exposure_file_path),
os.path.abspath(source_to_canonical_exposure_transformation_file_path),
xsd_path=None,
append_row_nums=True
)
trans_call_mock.assert_called_once_with()
@pytest.mark.skipif(True, reason="CSV file transformations to be removed")
class TransformCanonicalToModel(TestCase):
@given(
canonical_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters),
canonical_to_model_exposure_transformation_file_path=text(min_size=1, alphabet=string.ascii_letters),
canonical_exposure_validation_file_path=text(alphabet=string.ascii_letters),
model_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters)
)
def test_model_is_not_set___parameters_are_taken_from_kwargs(
self,
canonical_exposure_file_path,
canonical_to_model_exposure_transformation_file_path,
canonical_exposure_validation_file_path,
model_exposure_file_path):
trans_call_mock = Mock()
with patch('oasislmf.model_preparation.csv_trans.Translator', Mock(return_value=trans_call_mock)) as trans_mock:
om().transform_canonical_to_model(
canonical_exposure_file_path=canonical_exposure_file_path,
canonical_to_model_exposure_transformation_file_path=canonical_to_model_exposure_transformation_file_path,
model_exposure_file_path=model_exposure_file_path,
)
trans_mock.assert_called_once_with(
os.path.abspath(canonical_exposure_file_path),
os.path.abspath(model_exposure_file_path),
os.path.abspath(canonical_to_model_exposure_transformation_file_path),
xsd_path=None,
append_row_nums=False
)
trans_call_mock.assert_called_once_with()
@given(
canonical_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters),
canonical_to_model_exposure_transformation_file_path=text(min_size=1, alphabet=string.ascii_letters),
canonical_exposure_validation_file_path=text(alphabet=string.ascii_letters),
model_exposure_file_path=text(min_size=1, alphabet=string.ascii_letters)
)
def test_model_is_set___parameters_are_taken_from_model(
self,
canonical_exposure_file_path,
canonical_to_model_exposure_transformation_file_path,
canonical_exposure_validation_file_path,
model_exposure_file_path):
model = fake_model(resources={
'canonical_exposure_validation_file_path': canonical_exposure_validation_file_path,
'canonical_to_model_exposure_transformation_file_path': canonical_to_model_exposure_transformation_file_path,
})
model.resources['oasis_files_pipeline'].canonical_exposure_path = canonical_exposure_file_path
model.resources['oasis_files_pipeline'].model_exposure_file_path = model_exposure_file_path
trans_call_mock = Mock()
with patch('oasislmf.model_preparation.csv_trans.Translator', Mock(return_value=trans_call_mock)) as trans_mock:
om().transform_canonical_to_model(
canonical_exposure_file_path=canonical_exposure_file_path,
canonical_to_model_exposure_transformation_file_path=canonical_to_model_exposure_transformation_file_path,
model_exposure_file_path=model_exposure_file_path,
)
trans_mock.assert_called_once_with(
os.path.abspath(canonical_exposure_file_path),
os.path.abspath(model_exposure_file_path),
os.path.abspath(canonical_to_model_exposure_transformation_file_path),
xsd_path=None,
append_row_nums=False
)
trans_call_mock.assert_called_once_with()
class GetKeys(TestCase):
def create_model(
self,
lookup='lookup',
keys_file_path='key_file_path',
keys_errors_file_path='keys_errors_file_path',
model_exposure_file_path='model_exposure_file_path'
):
model = fake_model(resources={'lookup': lookup})
model.resources['oasis_files_pipeline'].keys_file_path = keys_file_path
model.resources['oasis_files_pipeline'].keys_errors_file_path = keys_errors_file_path
model.resources['oasis_files_pipeline'].model_exposure_file_path = model_exposure_file_path
return model
@given(
lookup=text(min_size=1, alphabet=string.ascii_letters),
keys_file_path=text(min_size=1, alphabet=string.ascii_letters),
keys_errors_file_path=text(min_size=1, alphabet=string.ascii_letters),
exposure_file_path=text(min_size=1, alphabet=string.ascii_letters)
)
def test_model_is_supplied_kwargs_are_not___lookup_keys_files_and_exposures_file_from_model_are_used(
self,
lookup,
keys_file_path,
keys_errors_file_path,
exposure_file_path
):
model = self.create_model(lookup=lookup, keys_file_path=keys_file_path, keys_errors_file_path=keys_errors_file_path, model_exposure_file_path=exposure_file_path)
with patch('oasislmf.model_preparation.lookup.OasisLookupFactory.save_results', Mock(return_value=(keys_file_path, 1, keys_errors_file_path, 1))) as oklf_mock:
res_keys_file_path, res_keys_errors_file_path = om().get_keys(oasis_model=model)
oklf_mock.assert_called_once_with(
lookup,
keys_file_path,
errors_fp=keys_errors_file_path,
model_exposure_fp=exposure_file_path
)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_file_path, keys_file_path)
self.assertEqual(res_keys_file_path, keys_file_path)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_errors_file_path, keys_errors_file_path)
self.assertEqual(res_keys_errors_file_path, keys_errors_file_path)
@given(
model_lookup=text(min_size=1, alphabet=string.ascii_letters),
model_keys_fp=text(min_size=1, alphabet=string.ascii_letters),
model_keys_errors_fp=text(min_size=1, alphabet=string.ascii_letters),
model_exposure_fp=text(min_size=1, alphabet=string.ascii_letters),
lookup=text(min_size=1, alphabet=string.ascii_letters),
keys_fp=text(min_size=1, alphabet=string.ascii_letters),
keys_errors_fp=text(min_size=1, alphabet=string.ascii_letters),
exposures_fp=text(min_size=1, alphabet=string.ascii_letters)
)
def test_model_and_kwargs_are_supplied___lookup_keys_files_and_exposures_file_from_kwargs_are_used(
self,
model_lookup,
model_keys_fp,
model_keys_errors_fp,
model_exposure_fp,
lookup,
keys_fp,
keys_errors_fp,
exposures_fp
):
model = self.create_model(lookup=model_lookup, keys_file_path=model_keys_fp, keys_errors_file_path=model_keys_errors_fp, model_exposure_file_path=model_exposure_fp)
with patch('oasislmf.model_preparation.lookup.OasisLookupFactory.save_results', Mock(return_value=(keys_fp, 1, keys_errors_fp, 1))) as oklf_mock:
res_keys_file_path, res_keys_errors_file_path = om().get_keys(
oasis_model=model,
lookup=lookup,
model_exposure_file_path=exposures_fp,
keys_file_path=keys_fp,
keys_errors_file_path=keys_errors_fp
)
oklf_mock.assert_called_once_with(
lookup,
keys_fp,
errors_fp=keys_errors_fp,
model_exposure_fp=exposures_fp
)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_file_path, keys_fp)
self.assertEqual(res_keys_file_path, keys_fp)
self.assertEqual(model.resources['oasis_files_pipeline'].keys_errors_file_path, keys_errors_fp)
self.assertEqual(res_keys_errors_file_path, keys_errors_fp)
class GetGulInputItems(TestCase):
def setUp(self):
self.profile = copy.deepcopy(canonical_exposure_profile)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=0),
keys=keys(size=2)
)
def test_no_fm_terms_in_canonical_profile__oasis_exception_is_raised(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
_p =copy.deepcopy(profile)
for _k, _v in iteritems(_p):
for __k, __v in iteritems(_v):
if 'FM' in __k:
profile[_k].pop(__k)
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
with self.assertRaises(OasisException):
om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=0),
keys=keys(size=2)
)
def test_no_canonical_items__oasis_exception_is_raised(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
with self.assertRaises(OasisException):
om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=2),
keys=keys(size=0)
)
def test_no_keys_items__oasis_exception_is_raised(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
with self.assertRaises(OasisException):
om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=2),
keys=keys(from_statuses=just(OASIS_KEYS_STATUS['success']['id']), size=2)
)
def test_canonical_items_dont_match_any_keys_items__oasis_exception_is_raised(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
l = len(exposures)
for key in keys:
key['id'] += l
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
with self.assertRaises(OasisException):
om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=2),
keys=keys(from_statuses=just(OASIS_KEYS_STATUS['success']['id']), size=2)
)
def test_canonical_profile_doesnt_have_any_tiv_fields__oasis_exception_is_raised(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
tivs = [profile[e]['ProfileElementName'] for e in profile if profile[e].get('FMTermType') and profile[e]['FMTermType'].lower() == 'tiv']
for t in tivs:
profile.pop(t)
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
with self.assertRaises(OasisException):
om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(
from_tivs1=just(0.0),
size=2
),
keys=keys(from_statuses=just(OASIS_KEYS_STATUS['success']['id']), size=2)
)
def test_canonical_items_dont_have_any_positive_tivs__oasis_exception_is_raised(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
with self.assertRaises(OasisException):
om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(
from_tivs1=just(1.0),
size=2
),
keys=keys(
from_coverage_type_ids=just(OASIS_COVERAGE_TYPES['buildings']['id']),
from_statuses=just(OASIS_KEYS_STATUS['success']['id']),
size=2
)
)
def test_only_buildings_coverage_type_in_exposure_and_model_lookup_supporting_single_peril_and_buildings_coverage_type__gul_items_are_generated(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
ufcp = unified_canonical_fm_profile_by_level_and_term_group(profiles=(profile,))
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
matching_canonical_and_keys_item_ids = set(k['id'] for k in keys).intersection([e['row_id'] for e in exposures])
gul_items_df, canexp_df = om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
get_canonical_item = lambda i: (
[e for e in exposures if e['row_id'] == i + 1][0] if len([e for e in exposures if e['row_id'] == i + 1]) == 1
else None
)
get_keys_item = lambda i: (
[k for k in keys if k['id'] == i + 1][0] if len([k for k in keys if k['id'] == i + 1]) == 1
else None
)
tiv_elements = (ufcp[1][1]['tiv'],)
fm_terms = {
1: {
'deductible': 'wscv1ded',
'deductible_min': None,
'deductible_max': None,
'limit': 'wscv1limit',
'share': None
}
}
for i, gul_it in enumerate(gul_items_df.T.to_dict().values()):
can_it = get_canonical_item(int(gul_it['canexp_id']))
self.assertIsNotNone(can_it)
keys_it = get_keys_item(int(gul_it['canexp_id']))
self.assertIsNotNone(keys_it)
positive_tiv_elements = [
t for t in tiv_elements if can_it.get(t['ProfileElementName'].lower()) and can_it[t['ProfileElementName'].lower()] > 0 and t['CoverageTypeID'] == keys_it['coverage_type']
]
for _, t in enumerate(positive_tiv_elements):
tiv_elm = t['ProfileElementName'].lower()
self.assertEqual(tiv_elm, gul_it['tiv_elm'])
tiv_tgid = t['FMTermGroupID']
self.assertEqual(can_it[tiv_elm], gul_it['tiv'])
ded_elm = fm_terms[tiv_tgid].get('deductible')
self.assertEqual(ded_elm, gul_it['ded_elm'])
ded_min_elm = fm_terms[tiv_tgid].get('deductible_min')
self.assertEqual(ded_min_elm, gul_it['ded_min_elm'])
ded_max_elm = fm_terms[tiv_tgid].get('deductible_max')
self.assertEqual(ded_max_elm, gul_it['ded_max_elm'])
lim_elm = fm_terms[tiv_tgid].get('limit')
self.assertEqual(lim_elm, gul_it['lim_elm'])
shr_elm = fm_terms[tiv_tgid].get('share')
self.assertEqual(shr_elm, gul_it['shr_elm'])
self.assertEqual(keys_it['area_peril_id'], gul_it['areaperil_id'])
self.assertEqual(keys_it['vulnerability_id'], gul_it['vulnerability_id'])
self.assertEqual(i + 1, gul_it['item_id'])
self.assertEqual(i + 1, gul_it['coverage_id'])
self.assertEqual(can_it['row_id'], gul_it['group_id'])
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(
from_tivs1=floats(min_value=1.0, allow_infinity=False),
from_tivs2=floats(min_value=2.0, allow_infinity=False),
from_tivs3=floats(min_value=3.0, allow_infinity=False),
from_tivs4=floats(min_value=4.0, allow_infinity=False),
size=2
),
keys=keys(
from_peril_ids=just(OASIS_PERILS['wind']['id']),
from_coverage_type_ids=just(OASIS_COVERAGE_TYPES['buildings']['id']),
from_statuses=just(OASIS_KEYS_STATUS['success']['id']),
size=8
)
)
def test_all_coverage_types_in_exposure_and_model_lookup_supporting_multiple_perils_but_only_buildings_and_other_structures_coverage_types__gul_items_are_generated(
self,
exposures,
keys
):
profile = copy.deepcopy(self.profile)
ufcp = unified_canonical_fm_profile_by_level_and_term_group(profiles=(profile,))
exposures[1]['wscv2val'] = exposures[1]['wscv3val'] = exposures[1]['wscv4val'] = 0.0
keys[1]['id'] = keys[2]['id'] = keys[3]['id'] = 1
keys[2]['peril_id'] = keys[3]['peril_id'] = OASIS_PERILS['quake']['id']
keys[1]['coverage_type'] = keys[3]['coverage_type'] = OASIS_COVERAGE_TYPES['other']['id']
keys[4]['id'] = keys[5]['id'] = keys[6]['id'] = keys[7]['id'] = 2
keys[6]['peril_id'] = keys[7]['peril_id'] = OASIS_PERILS['quake']['id']
keys[5]['coverage_type'] = keys[7]['coverage_type'] = OASIS_COVERAGE_TYPES['other']['id']
with NamedTemporaryFile('w') as exposures_file, NamedTemporaryFile('w') as keys_file:
write_canonical_files(exposures, exposures_file.name)
write_keys_files(keys, keys_file.name)
matching_canonical_and_keys_item_ids = set(k['id'] for k in keys).intersection([e['row_id'] for e in exposures])
gul_items_df, canexp_df = om().get_gul_input_items(profile, exposures_file.name, keys_file.name)
self.assertEqual(len(gul_items_df), 6)
self.assertEqual(len(canexp_df), 2)
tiv_elements = (ufcp[1][1]['tiv'], ufcp[1][2]['tiv'])
fm_terms = {
1: {
'deductible': 'wscv1ded',
'deductible_min': None,
'deductible_max': None,
'limit': 'wscv1limit',
'share': None
},
2: {
'deductible': 'wscv2ded',
'deductible_min': None,
'deductible_max': None,
'limit': 'wscv2limit',
'share': None
}
}
for i, gul_it in enumerate(gul_items_df.T.to_dict().values()):
can_it = canexp_df.iloc[gul_it['canexp_id']].to_dict()
keys_it = [k for k in keys if k['id'] == gul_it['canexp_id'] + 1 and k['peril_id'] == gul_it['peril_id'] and k['coverage_type'] == gul_it['coverage_type_id']][0]
positive_tiv_term = [t for t in tiv_elements if can_it.get(t['ProfileElementName'].lower()) and can_it[t['ProfileElementName'].lower()] > 0 and t['CoverageTypeID'] == keys_it['coverage_type']][0]
tiv_elm = positive_tiv_term['ProfileElementName'].lower()
self.assertEqual(tiv_elm, gul_it['tiv_elm'])
tiv_tgid = positive_tiv_term['FMTermGroupID']
self.assertEqual(can_it[tiv_elm], gul_it['tiv'])
ded_elm = fm_terms[tiv_tgid].get('deductible')
self.assertEqual(ded_elm, gul_it['ded_elm'])
ded_min_elm = fm_terms[tiv_tgid].get('deductible_min')
self.assertEqual(ded_min_elm, gul_it['ded_min_elm'])
ded_max_elm = fm_terms[tiv_tgid].get('deductible_max')
self.assertEqual(ded_max_elm, gul_it['ded_max_elm'])
lim_elm = fm_terms[tiv_tgid].get('limit')
self.assertEqual(lim_elm, gul_it['lim_elm'])
shr_elm = fm_terms[tiv_tgid].get('share')
self.assertEqual(shr_elm, gul_it['shr_elm'])
self.assertEqual(keys_it['area_peril_id'], gul_it['areaperil_id'])
self.assertEqual(keys_it['vulnerability_id'], gul_it['vulnerability_id'])
self.assertEqual(i + 1, gul_it['item_id'])
self.assertEqual(i + 1, gul_it['coverage_id'])
self.assertEqual(can_it['row_id'], gul_it['group_id'])
class GetFmInputItems(TestCase):
def setUp(self):
self.exposures_profile = copy.deepcopy(canonical_exposure_profile)
self.accounts_profile = copy.deepcopy(canonical_accounts_profile)
self.unified_canonical_profile = unified_canonical_fm_profile_by_level_and_term_group(
profiles=[self.exposures_profile, self.accounts_profile]
)
self.fm_agg_profile = copy.deepcopy(oasis_fm_agg_profile)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=2),
accounts=canonical_accounts(size=1),
guls=gul_input_items(size=2)
)
def test_no_fm_terms_in_canonical_profiles__oasis_exception_is_raised(
self,
exposures,
accounts,
guls
):
cep = copy.deepcopy(self.exposures_profile)
cap = copy.deepcopy(self.accounts_profile)
_cep =copy.deepcopy(cep)
_cap =copy.deepcopy(cap)
for _k, _v in iteritems(_cep):
for __k, __v in iteritems(_v):
if 'FM' in __k:
cep[_k].pop(__k)
for _k, _v in iteritems(_cap):
for __k, __v in iteritems(_v):
if 'FM' in __k:
cap[_k].pop(__k)
with NamedTemporaryFile('w') as accounts_file:
write_canonical_files(accounts, accounts_file.name)
with self.assertRaises(OasisException):
fm_df, canacc_df = om().get_fm_input_items(
pd.DataFrame(data=exposures),
pd.DataFrame(data=guls),
cep,
cap,
accounts_file.name,
self.fm_agg_profile
)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=2),
guls=gul_input_items(size=2)
)
def test_no_aggregation_profile__oasis_exception_is_raised(
self,
exposures,
guls
):
cep = copy.deepcopy(self.exposures_profile)
cap = copy.deepcopy(self.accounts_profile)
fmap = {}
with NamedTemporaryFile('w') as accounts_file:
write_canonical_files(canonical_accounts=[], canonical_accounts_file_path=accounts_file.name)
with self.assertRaises(OasisException):
fm_df, canacc_df = om().get_fm_input_items(
pd.DataFrame(data=exposures),
pd.DataFrame(data=guls),
cep,
cap,
accounts_file.name,
fmap
)
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
exposures=canonical_exposure(size=2),
guls=gul_input_items(size=2)
)
def test_no_canonical_accounts_items__oasis_exception_is_raised(
self,
exposures,
guls
):
cep = copy.deepcopy(self.exposures_profile)
cap = copy.deepcopy(self.accounts_profile)
fmap = copy.deepcopy(self.fm_agg_profile)
with NamedTemporaryFile('w') as accounts_file:
write_canonical_files(canonical_accounts=[], canonical_accounts_file_path=accounts_file.name)
with self.assertRaises(OasisException):
fm_df, canacc_df = om().get_fm_input_items(
pd.DataFrame(data=exposures),
pd.DataFrame(data=guls),
cep,
cap,
accounts_file.name,
fmap
)
class GulInputFilesGenerationTestCase(TestCase):
def setUp(self):
self.profile = canonical_exposure_profile
self.manager = om()
def check_items_file(self, gul_items_df, items_file_path):
expected = tuple(
{
k:it[k] for k in ('item_id', 'coverage_id', 'areaperil_id', 'vulnerability_id', 'group_id',)
} for _, it in gul_items_df.iterrows()
)
with io.open(items_file_path, 'r', encoding='utf-8') as f:
result = tuple(pd.read_csv(f).T.to_dict().values())
self.assertEqual(expected, result)
def check_coverages_file(self, gul_items_df, coverages_file_path):
expected = tuple(
{
k:it[k] for k in ('coverage_id', 'tiv',)
} for _, it in gul_items_df.iterrows()
)
with io.open(coverages_file_path, 'r', encoding='utf-8') as f:
result = tuple(pd.read_csv(f).T.to_dict().values())
self.assertEqual(expected, result)
def check_gulsummaryxref_file(self, gul_items_df, gulsummaryxref_file_path):
expected = tuple(
{
k:it[k] for k in ('coverage_id', 'summary_id', 'summaryset_id',)
} for _, it in gul_items_df.iterrows()
)
with io.open(gulsummaryxref_file_path, 'r', encoding='utf-8') as f:
result = tuple(pd.read_csv(f).T.to_dict().values())
self.assertEqual(expected, result)
class FmInputFilesGenerationTestCase(TestCase):
def setUp(self):
self.exposures_profile = canonical_exposure_profile
self.accounts_profile = canonical_accounts_profile
self.unified_canonical_profile = unified_canonical_fm_profile_by_level_and_term_group(
profiles=(self.exposures_profile, self.accounts_profile,)
)
self.fm_agg_profile = oasis_fm_agg_profile
self.manager = om()
def check_fm_policytc_file(self, fm_items_df, fm_policytc_file_path):
fm_policytc_df = pd.DataFrame(
columns=['layer_id', 'level_id', 'agg_id', 'policytc_id'],
data=[key[:4] for key, _ in fm_items_df.groupby(['layer_id', 'level_id', 'agg_id', 'policytc_id', 'limit', 'deductible', 'share'])],
dtype=object
)
expected = tuple(
{
k:it[k] for k in ('layer_id', 'level_id', 'agg_id', 'policytc_id',)
} for _, it in fm_policytc_df.iterrows()
)
with io.open(fm_policytc_file_path, 'r', encoding='utf-8') as f:
result = tuple(pd.read_csv(f).T.to_dict().values())
self.assertEqual(expected, result)
def check_fm_profile_file(self, fm_items_df, fm_profile_file_path):
cols = ['policytc_id', 'calcrule_id', 'limit', 'deductible', 'deductible_min', 'deductible_max', 'attachment', 'share']
fm_profile_df = fm_items_df[cols]
fm_profile_df = pd.DataFrame(
columns=cols,
data=[key for key, _ in fm_profile_df.groupby(cols)]
)
col_repl = [
{'deductible': 'deductible1'},
{'deductible_min': 'deductible2'},
{'deductible_max': 'deductible3'},
{'attachment': 'attachment1'},
{'limit': 'limit1'},
{'share': 'share1'}
]
for repl in col_repl:
fm_profile_df.rename(columns=repl, inplace=True)
n = len(fm_profile_df)
fm_profile_df['index'] = range(n)
fm_profile_df['share2'] = fm_profile_df['share3'] = [0]*n
expected = tuple(
{
k:it[k] for k in ('policytc_id','calcrule_id','deductible1', 'deductible2', 'deductible3', 'attachment1', 'limit1', 'share1', 'share2', 'share3',)
} for _, it in fm_profile_df.iterrows()
)
with io.open(fm_profile_file_path, 'r', encoding='utf-8') as f:
result = tuple(pd.read_csv(f).T.to_dict().values())
self.assertEqual(expected, result)
def check_fm_programme_file(self, fm_items_df, fm_programme_file_path):
fm_programme_df = pd.DataFrame(
pd.concat([fm_items_df[fm_items_df['level_id']==OASIS_FM_LEVELS['coverage']['id']], fm_items_df])[['level_id', 'agg_id']],
dtype=int
).reset_index(drop=True)
num_cov_items = len(fm_items_df[fm_items_df['level_id']==OASIS_FM_LEVELS['coverage']['id']])
for i in range(num_cov_items):
fm_programme_df.at[i, 'level_id'] = 0
def from_agg_id_to_agg_id(from_level_id, to_level_id):
iterator = (
(from_level_it, to_level_it)
for (_,from_level_it), (_, to_level_it) in zip(
fm_programme_df[fm_programme_df['level_id']==from_level_id].iterrows(),
fm_programme_df[fm_programme_df['level_id']==to_level_id].iterrows()
)
)
for from_level_it, to_level_it in iterator:
yield from_level_it['agg_id'], to_level_id, to_level_it['agg_id']
levels = list(set(fm_programme_df['level_id']))
data = [
(from_agg_id, level_id, to_agg_id) for from_level_id, to_level_id in zip(levels, levels[1:]) for from_agg_id, level_id, to_agg_id in from_agg_id_to_agg_id(from_level_id, to_level_id)
]
fm_programme_df = pd.DataFrame(columns=['from_agg_id', 'level_id', 'to_agg_id'], data=data, dtype=int).drop_duplicates()
expected = tuple(
{
k:it[k] for k in ('from_agg_id', 'level_id', 'to_agg_id',)
} for _, it in fm_programme_df.iterrows()
)
with io.open(fm_programme_file_path, 'r', encoding='utf-8') as f:
result = tuple(pd.read_csv(f).T.to_dict().values())
self.assertEqual(expected, result)
def check_fm_xref_file(self, fm_items_df, fm_xref_file_path):
data = [
(i + 1, agg_id, layer_id) for i, (agg_id, layer_id) in enumerate(itertools.product(set(fm_items_df['agg_id']), set(fm_items_df['layer_id'])))
]
fm_xref_df = pd.DataFrame(columns=['output', 'agg_id', 'layer_id'], data=data, dtype=int)
expected = tuple(
{
k:it[k] for k in ('output', 'agg_id', 'layer_id',)
} for _, it in fm_xref_df.iterrows()
)
with io.open(fm_xref_file_path, 'r', encoding='utf-8') as f:
result = tuple(pd.read_csv(f).T.to_dict().values())
self.assertEqual(expected, result)
def check_fmsummaryxref_file(self, fm_items_df, fmsummaryxref_file_path):
data = [
(i + 1, 1, 1) for i, _ in enumerate(itertools.product(set(fm_items_df['agg_id']), set(fm_items_df['layer_id'])))
]
fmsummaryxref_df = | pd.DataFrame(columns=['output', 'summary_id', 'summaryset_id'], data=data, dtype=int) | pandas.DataFrame |
import pandas
import numpy
import warnings
import itertools
import matplotlib.pyplot as plt
import seaborn
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import RFE
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
from sklearn import metrics as metrics
warnings.filterwarnings("ignore")
train = pandas.read_csv("train.csv")
test = | pandas.read_csv("test.csv") | pandas.read_csv |
import csv
import numpy
import pandas as pd
import numpy as np
from core.component import Component
from core.power import PowerInterface
class Source(Component):
def __init__(self, location, power):
super().__init__(location)
self.power_in = power
def get_power_in(self): # TODO Check that this is correct
pass
class HighSpeedDiesel(Source):
engine_row = 2 # This is currently #####
engine_data = None
def __init__(self, location, power):
super().__init__(location, power)
self.power_brake = power
self.percent_load = None
self.SFOC = None
self.NOX_rate = None
self.SOX_rate = None
self.CO2_rate = None
self.power = None
if not self.engine_data:
self.emission_curves()
def emission_curves(self):
data = 'Cat_engine_data.csv'
engine_data = pd.read_csv(data)
engine_data.head()
engine_data.set_index('Engine', inplace=True)
def solve_emissions(self):
self.solve_fuel_consumption()
self.solve_NOX()
self.solve_CO()
self.solve_HC()
self.solve_CO2()
self.solve_PM()
self.solve_CO2_eq()
def solve_fuel_consumption(self):
SFOC_data = pd.DataFrame(data={'engine_load': [1, 0.9, 0.8, 0.75, 0.7, 0.6, 0.5, 0.4, 0.3, 0.25, 0.2, 0.1],
'SFOC': 608.3 * self.engine_data.iloc[self.engine_row][
['100% BSFC', '90% BSFC', '80% BSFC', '75% BSFC', '70% BSFC', '60% BSFC',
'50% BSFC', '40% BSFC', '30% BSFC', '25% BSFC', '20% BSFC', '10% BSFC']]})
SFOC_data_fit = np.polyfit(SFOC_data['engine_load'], SFOC_data['SFOC'], 4)
self.SFOC = numpy.polyval(SFOC_data_fit, self.percent_load) # in g/kWh
self.fuel_consumption = self.SFOC * self.power # in g/hr
def solve_NOX(self):
self.NOX_data = pd.DataFrame(data={'engine_load': [1, 0.75, 0.5, 0.25, 0.1],
'NOX': self.engine_data.iloc[self.engine_row][
['100% NOX', '75% NOX', '50% NOX', '25% NOX', '10% NOX']]
/ np.multiply([1, 0.75, 0.5, 0.25, 0.1], 0.7457*self.engine_data.iloc[self.engine_row]['BHP'])})
# 0.7457 converts BHP to BkW
self.specific_NOX_rate = np.interp(self.percent_load, np.flip(self.NOX_data['engine_load']), np.flip(self.NOX_data['NOX'])) # in g/kWh
self.NOX_rate = self.specific_NOX_rate * self.power # in g/hr
def get_sox(self, power_wanted):
# TODO Fix or remove this
sox_specific_rate = 10 # Using standard SOX generation rate of 10g/kWh from L21/31 Project Guide
self.sox_rate = power_wanted * sox_specific_rate # This gives the sox generation rate in g/hr
return self.sox_rate
def solve_CO(self):
CO_data = pd.DataFrame(data={'engine_load': [1, 0.75, 0.5, 0.25, 0.1],
'CO': self.engine_data.iloc[self.engine_row][['100% CO', '75% CO', '50% CO', '25% CO', '10% CO']]
/ np.multiply([1, 0.75, 0.5, 0.25, 0.1],
0.7457*self.engine_data.iloc[self.engine_row]['BHP'])})
self.CO_specific_rate = np.interp(self.percent_load, np.flip(CO_data['engine_load']), np.flip(CO_data['CO'])) # in g/kWh
self.CO_rate = self.CO2_specific_rate * self.power # in g/hr
def solve_HC(self):
self.HC_data = pd.DataFrame(data={'engine_load': [1, 0.75, 0.5, 0.25, 0.1],
'HC': self.engine_data.iloc[self.engine_row][['100% HC', '75% HC', '50% HC', '25% HC', '10% HC']]
/ np.multiply([1, 0.75, 0.5, 0.25, 0.1], 0.7457*self.engine_data.iloc[self.engine_row]['BHP'])})
self.HC_specific_rate = np.interp(self.percent_load, np.flip(self.HC_data['engine_load']), np.flip(self.HC_data['HC'])) # in g/kWh
self.HC_rate = self.HC_specific_rate * self.power # in g/hr
def solve_CO2(self):
self.CO2_data = pd.DataFrame(data={'engine_load': [1, 0.75, 0.5, 0.25, 0.1],
'CO2': self.engine_data.iloc[self.engine_row][
['100% CO2', '75% CO2', '50% CO2', '25% CO2', '10% CO2']]
/ np.multiply([1, 0.75, 0.5, 0.25, 0.1], 0.7457*self.engine_data.iloc[self.engine_row]['BHP'])})
CO2_data_fit = np.polyfit(self.CO2_data['engine_load'], self.CO2_data['CO2'], 4)
self.CO2_specific_rate = np.polyval(CO2_data_fit, self.percent_load) # in kg/kWh
self.CO2_rate = self.CO2_specific_rate * self.power # in kg/hr
def solve_PM(self):
PM_data = pd.DataFrame(data={'engine_load': [1, 0.75, 0.5, 0.25, 0.1],
'PM': self.engine_data.iloc[self.engine_row][['100% PM', '75% PM', '50% PM', '25% PM', '10% PM']]
/ np.multiply([1, 0.75, 0.5, 0.25, 0.1], 0.7457*self.engine_data.iloc[self.engine_row]['BHP'])})
self.PM_specific_rate = np.interp(self.percent_load, np.flip(PM_data['engine_load']), np.flip(PM_data['PM'])) # in g/kWh
self.PM_rate = self.PM_specific_rate * self.power # in g/hr
def solve_CO2_eq(self):
GWP_CH4 = 25
GWP_N2O = 298
CO2_eq = self.CO2_data['CO2'].reset_index(drop=True) + GWP_CH4 * self.HC_data['HC'].reset_index(
drop=True) / 1000 + GWP_N2O * self.NOX_data['NOX'].reset_index(drop=True) / 1000
CO2_eq_data = | pd.DataFrame(data={'engine_load': [1, 0.75, 0.5, 0.25, 0.1], 'CO2_eq': CO2_eq}) | pandas.DataFrame |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for module main.py. Fixtures comes from file conftest.py located at the same dir
of this file.
"""
from __future__ import absolute_import, division, print_function
import os
import mock
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_equal
from pandas.core.indexes.range import RangeIndex
from pandas.util.testing import assert_frame_equal
from statsmodels.tsa.statespace.structural import (
UnobservedComponents, UnobservedComponentsResultsWrapper)
from causalimpact import CausalImpact
from causalimpact.misc import standardize
def test_default_causal_cto(rand_data, pre_int_period, post_int_period):
ci = CausalImpact(rand_data, pre_int_period, post_int_period)
assert_frame_equal(ci.data, rand_data)
assert ci.pre_period == pre_int_period
assert ci.post_period == post_int_period
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
assert_frame_equal(ci.pre_data, pre_data)
post_data = rand_data.loc[post_int_period[0]: post_int_period[1], :]
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'standardize': True, 'nseasons': []}
assert isinstance(ci.model, UnobservedComponents)
assert_array_equal(ci.model.endog, normed_pre_data.iloc[:, 0].values.reshape(-1, 1))
assert_array_equal(ci.model.exog, normed_pre_data.iloc[:, 1:].values.reshape(
-1,
rand_data.shape[1] - 1
)
)
assert ci.model.endog_names == 'y'
assert ci.model.exog_names == ['x1', 'x2']
assert ci.model.k_endog == 1
assert ci.model.level
assert ci.model.trend_specification == 'local level'
assert isinstance(ci.trained_model, UnobservedComponentsResultsWrapper)
assert ci.trained_model.nobs == len(pre_data)
assert ci.inferences is not None
assert ci.p_value > 0 and ci.p_value < 1
assert ci.n_sims == 1000
def test_default_causal_cto_w_date(date_rand_data, pre_str_period, post_str_period):
ci = CausalImpact(date_rand_data, pre_str_period, post_str_period)
assert_frame_equal(ci.data, date_rand_data)
assert ci.pre_period == pre_str_period
assert ci.post_period == post_str_period
pre_data = date_rand_data.loc[pre_str_period[0]: pre_str_period[1], :]
assert_frame_equal(ci.pre_data, pre_data)
post_data = date_rand_data.loc[post_str_period[0]: post_str_period[1], :]
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'standardize': True, 'nseasons': []}
assert isinstance(ci.model, UnobservedComponents)
assert_array_equal(ci.model.endog, normed_pre_data.iloc[:, 0].values.reshape(-1, 1))
assert_array_equal(ci.model.exog, normed_pre_data.iloc[:, 1:].values.reshape(
-1,
date_rand_data.shape[1] - 1
)
)
assert ci.model.endog_names == 'y'
assert ci.model.exog_names == ['x1', 'x2']
assert ci.model.k_endog == 1
assert ci.model.level
assert ci.model.trend_specification == 'local level'
assert isinstance(ci.trained_model, UnobservedComponentsResultsWrapper)
assert ci.trained_model.nobs == len(pre_data)
assert ci.inferences is not None
assert ci.p_value > 0 and ci.p_value < 1
assert ci.n_sims == 1000
def test_default_causal_cto_no_exog(rand_data, pre_int_period, post_int_period):
rand_data = pd.DataFrame(rand_data.iloc[:, 0])
ci = CausalImpact(rand_data, pre_int_period, post_int_period)
assert_frame_equal(ci.data, rand_data)
assert ci.pre_period == pre_int_period
assert ci.post_period == post_int_period
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
assert_frame_equal(ci.pre_data, pre_data)
post_data = rand_data.loc[post_int_period[0]: post_int_period[1], :]
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'standardize': True, 'nseasons': []}
assert isinstance(ci.model, UnobservedComponents)
assert_array_equal(ci.model.endog, normed_pre_data.iloc[:, 0].values.reshape(-1, 1))
assert ci.model.exog is None
assert ci.model.endog_names == 'y'
assert ci.model.exog_names is None
assert ci.model.k_endog == 1
assert ci.model.level
assert ci.model.trend_specification == 'local level'
assert isinstance(ci.trained_model, UnobservedComponentsResultsWrapper)
assert ci.trained_model.nobs == len(pre_data)
assert ci.inferences is not None
assert ci.p_value > 0 and ci.p_value < 1
assert ci.n_sims == 1000
def test_default_causal_cto_w_np_array(rand_data, pre_int_period, post_int_period):
data = rand_data.values
ci = CausalImpact(data, pre_int_period, post_int_period)
assert_array_equal(ci.data, data)
assert ci.pre_period == pre_int_period
assert ci.post_period == post_int_period
pre_data = pd.DataFrame(data[pre_int_period[0]: pre_int_period[1] + 1, :])
assert_frame_equal(ci.pre_data, pre_data)
post_data = pd.DataFrame(data[post_int_period[0]: post_int_period[1] + 1, :])
post_data.index = RangeIndex(start=len(pre_data), stop=len(rand_data))
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'standardize': True, 'nseasons': []}
assert isinstance(ci.model, UnobservedComponents)
assert_array_equal(ci.model.endog, normed_pre_data.iloc[:, 0].values.reshape(-1, 1))
assert_array_equal(ci.model.exog, normed_pre_data.iloc[:, 1:].values.reshape(
-1,
data.shape[1] - 1
)
)
assert ci.model.endog_names == 'y'
assert ci.model.exog_names == [1, 2]
assert ci.model.k_endog == 1
assert ci.model.level
assert ci.model.trend_specification == 'local level'
assert isinstance(ci.trained_model, UnobservedComponentsResultsWrapper)
assert ci.trained_model.nobs == len(pre_data)
assert ci.inferences is not None
assert ci.p_value > 0 and ci.p_value < 1
assert ci.n_sims == 1000
def test_causal_cto_w_no_standardization(rand_data, pre_int_period, post_int_period):
ci = CausalImpact(rand_data, pre_int_period, post_int_period, standardize=False)
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
assert ci.normed_pre_data is None
assert ci.normed_post_data is None
assert ci.mu_sig is None
assert_array_equal(ci.model.endog, pre_data.iloc[:, 0].values.reshape(-1, 1))
assert_array_equal(ci.model.exog, pre_data.iloc[:, 1:].values.reshape(
-1,
rand_data.shape[1] - 1
)
)
assert ci.p_value > 0 and ci.p_value < 1
def test_causal_cto_w_seasons(date_rand_data, pre_str_period, post_str_period):
ci = CausalImpact(date_rand_data, pre_str_period, post_str_period,
nseasons=[{'period': 4}, {'period': 3}])
assert ci.model.freq_seasonal_periods == [4, 3]
assert ci.model.freq_seasonal_harmonics == [2, 1]
ci = CausalImpact(date_rand_data, pre_str_period, post_str_period,
nseasons=[{'period': 4, 'harmonics': 1},
{'period': 3, 'harmonis': 1}])
assert ci.model.freq_seasonal_periods == [4, 3]
assert ci.model.freq_seasonal_harmonics == [1, 1]
def test_causal_cto_w_custom_model_and_seasons(rand_data, pre_int_period,
post_int_period):
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
model = UnobservedComponents(endog=pre_data.iloc[:, 0], level='llevel',
exog=pre_data.iloc[:, 1:],
freq_seasonal=[{'period': 4}, {'period': 3}])
ci = CausalImpact(rand_data, pre_int_period, post_int_period, model=model)
assert ci.model.freq_seasonal_periods == [4, 3]
assert ci.model.freq_seasonal_harmonics == [2, 1]
def test_causal_cto_w_custom_model(rand_data, pre_int_period, post_int_period):
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
model = UnobservedComponents(endog=pre_data.iloc[:, 0], level='llevel',
exog=pre_data.iloc[:, 1:])
ci = CausalImpact(rand_data, pre_int_period, post_int_period, model=model)
assert ci.model.endog_names == 'y'
assert ci.model.exog_names == ['x1', 'x2']
assert ci.model.k_endog == 1
assert ci.model.level
assert ci.model.trend_specification == 'local level'
assert isinstance(ci.trained_model, UnobservedComponentsResultsWrapper)
assert ci.trained_model.nobs == len(pre_data)
def test_causal_cto_raises_on_None_input(rand_data, pre_int_period, post_int_period):
with pytest.raises(ValueError) as excinfo:
CausalImpact(None, pre_int_period, post_int_period)
assert str(excinfo.value) == 'data input cannot be empty'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, None, post_int_period)
assert str(excinfo.value) == 'pre_period input cannot be empty'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, None)
assert str(excinfo.value) == 'post_period input cannot be empty'
def test_invalid_data_input_raises():
with pytest.raises(ValueError) as excinfo:
CausalImpact('test', [0, 5], [5, 10])
assert str(excinfo.value) == 'Could not transform input data to pandas DataFrame.'
data = [1, 2, 3, 4, 5, 6, 2 + 1j]
with pytest.raises(ValueError) as excinfo:
CausalImpact(data, [0, 3], [3, 6])
assert str(excinfo.value) == 'Input data must contain only numeric values.'
data = np.random.randn(10, 2)
data[0, 1] = np.nan
with pytest.raises(ValueError) as excinfo:
CausalImpact(data, [0, 3], [3, 6])
assert str(excinfo.value) == 'Input data cannot have NAN values.'
def test_invalid_response_raises():
data = np.random.rand(100, 2)
data[:, 0] = np.ones(len(data)) * np.nan
with pytest.raises(ValueError) as excinfo:
CausalImpact(data, [0, 50], [50, 100])
assert str(excinfo.value) == 'Input response cannot have just Null values.'
data[0:2, 0] = 1
with pytest.raises(ValueError) as excinfo:
CausalImpact(data, [0, 50], [50, 100])
assert str(excinfo.value) == ('Input response must have more than 3 non-null points '
'at least.')
data[0:3, 0] = 1
with pytest.raises(ValueError) as excinfo:
CausalImpact(data, [0, 50], [50, 100])
assert str(excinfo.value) == 'Input response cannot be constant.'
def test_invalid_alpha_raises(rand_data, pre_int_period, post_int_period):
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period, alpha=1)
assert str(excinfo.value) == 'alpha must be of type float.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period, alpha=2.)
assert str(excinfo.value) == (
'alpha must range between 0 (zero) and 1 (one) inclusive.'
)
def test_custom_model_input_validation(rand_data, pre_int_period, post_int_period):
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period, model='test')
assert str(excinfo.value) == 'Input model must be of type UnobservedComponents.'
ucm = UnobservedComponents(rand_data.iloc[:101, 0], level='llevel',
exog=rand_data.iloc[:101, 1:])
ucm.level = False
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period, model=ucm)
assert str(excinfo.value) == 'Model must have level attribute set.'
ucm = UnobservedComponents(rand_data.iloc[:101, 0], level='llevel',
exog=rand_data.iloc[:101, 1:])
ucm.exog = None
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period, model=ucm)
assert str(excinfo.value) == 'Model must have exog attribute set.'
ucm = UnobservedComponents(rand_data.iloc[:101, 0], level='llevel',
exog=rand_data.iloc[:101, 1:])
ucm.data = None
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period, model=ucm)
assert str(excinfo.value) == 'Model must have data attribute set.'
def test_kwargs_validation(rand_data, pre_int_period, post_int_period):
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period,
standardize='yes')
assert str(excinfo.value) == 'Standardize argument must be of type bool.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period,
standardize=False, nseasons=[7])
assert str(excinfo.value) == (
'nseasons must be a list of dicts with the required key "period" and the '
'optional key "harmonics".'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period,
standardize=False, nseasons=[{'test': 8}])
assert str(excinfo.value) == 'nseasons dicts must contain the key "period" defined.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, pre_int_period, post_int_period,
standardize=False, nseasons=[{'period': 4, 'harmonics': 3}])
assert str(excinfo.value) == (
'Total harmonics must be less or equal than periods divided by 2.')
def test_periods_validation(rand_data, date_rand_data):
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [5, 10], [4, 7])
assert str(excinfo.value) == (
'Values in training data cannot be present in the '
'post-intervention data. Please fix your pre_period value to cover at most one '
'point less from when the intervention happened.'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, ['20180101', '20180201'],
['20180110', '20180210'])
assert str(excinfo.value) == (
'Values in training data cannot be present in the '
'post-intervention data. Please fix your pre_period value to cover at most one '
'point less from when the intervention happened.'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [5, 10], [15, 11])
assert str(excinfo.value) == 'post_period last number must be bigger than its first.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, ['20180101', '20180110'],
['20180115', '20180111'])
assert str(excinfo.value) == 'post_period last number must be bigger than its first.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [0, 2], [15, 11])
assert str(excinfo.value) == 'pre_period must span at least 3 time points.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, ['20180101', '20180102'],
['20180115', '20180111'])
assert str(excinfo.value) == 'pre_period must span at least 3 time points.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [5, 0], [15, 11])
assert str(excinfo.value) == 'pre_period last number must be bigger than its first.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, ['20180105', '20180101'],
['20180115', '20180111'])
assert str(excinfo.value) == 'pre_period last number must be bigger than its first.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, 0, [15, 11])
assert str(excinfo.value) == 'Input period must be of type list.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, '20180101', ['20180115', '20180130'])
assert str(excinfo.value) == 'Input period must be of type list.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [0, 10, 30], [15, 11])
assert str(excinfo.value) == (
'Period must have two values regarding the beginning '
'and end of the pre and post intervention data.'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [0, None], [15, 11])
assert str(excinfo.value) == 'Input period cannot have `None` values.'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [0, 5.5], [15, 11])
assert str(excinfo.value) == 'Input must contain either int, str or pandas Timestamp'
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [-2, 10], [11, 20])
assert str(excinfo.value) == (
'-2 not present in input data index.'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [0, 10], [11, 2000])
assert str(excinfo.value) == (
'2000 not present in input data index.'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, ['20180101', '20180110'],
['20180111', '20180130'])
assert str(excinfo.value) == (
'20180101 not present in input data index.'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, ['20180101', '20180110'],
['20180111', '20200130'])
assert str(excinfo.value) == ('20200130 not present in input data index.')
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, ['20170101', '20180110'],
['20180111', '20180120'])
assert str(excinfo.value) == ('20170101 not present in input data index.')
with pytest.raises(ValueError) as excinfo:
CausalImpact(rand_data, [pd.Timestamp('20180101'), pd.Timestamp('20180110')],
[pd.Timestamp('20180111'), pd.Timestamp('20180130')])
assert str(excinfo.value) == (
'20180101 not present in input data index.'
)
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, [pd.Timestamp('20180101'),
pd.Timestamp('20180110')], [pd.Timestamp('20180111'),
pd.Timestamp('20200130')])
assert str(excinfo.value) == ('20200130 not present in input data index.')
with pytest.raises(ValueError) as excinfo:
CausalImpact(date_rand_data, [ | pd.Timestamp('20170101') | pandas.Timestamp |
# coding: utf-8
# # Prototype: Merging Newcomer Dataframes with ORES scores for Newcomer Contributions
# June 8, 2018 <NAME>
#
# Using data sources from http://paws-public.wmflabs.org/paws-public/User:Juliakamin/Querying%20new%20editors%20via%20sql.ipynb
import os, time, datetime, csv, glob, math, datetime, pprint
from collections import defaultdict, Counter
import pandas as pd
import numpy as np
from dateutil import parser
import click
@click.command()
@click.option('--lang', default='es', help='the wiki language to target')
@click.option('--datadir', default='data', help='where to look for the data')
@click.option('--resultsdir', default='results', help='where to plop results when done')
def main(lang, datadir, resultsdir):
newcomer_file = os.path.join(datadir, lang+"_newcomer_list.csv")
newcomer_revisions_files = glob.glob(
os.path.join(datadir, lang + "_newcomer_revisions*.csv"))
newcomers = {}
counter = 0
with open(newcomer_file, "r") as f:
for newcomer in csv.DictReader(f.readlines()):
## REMOVE OUT THE PANDAS SEQUENTIAL INDEX
## IF IT EXISTS
if '' in newcomer.keys():
del newcomer['']
newcomer['wiki'] = lang
newcomer['registration.date'] = datetime.datetime.strptime(
newcomer['registration'].replace("b'","").replace("'",""),
"%Y%m%d%H%M%S")
newcomers[newcomer['user id']] = newcomer
counter += 1
newcomer_revisions = defaultdict(list)
all_ids = set()
for filename in newcomer_revisions_files:
with open(filename, "r") as f:
print(filename)
for revision in csv.DictReader(f.readlines()):
revision['wiki'] = lang
revision_id = revision['revision id']
if('' in revision.keys()):
del revision['']
if(revision_id not in all_ids):
newcomer_revisions[revision['user id']].append(revision)
all_ids.add(revision_id)
for key, revisions in newcomer_revisions.items():
newcomer_revisions[key] = sorted(revisions,
key=lambda x:parser.parse(x['revision time']))
# ### Data Validation
# Here, we confirm that every newcomer has at least one revision
# And that there aren't any revisions that have no newcomer
print("{0} total unique newcomers".format(len(set([x for x in newcomers.keys()]))))
print("{0} total newcomers with edit records in the dataset".format(
len(newcomer_revisions.keys())))
newcomers_in_revision_set = set()
revisions_not_in_newcomer_set = set()
newcomers_not_in_revision_set = set()
for user_id, revisions in newcomer_revisions.items():
if user_id in newcomers.keys():
newcomers_in_revision_set.add(user_id)
else:
revisions_not_in_newcomer_set.add(user_id)
for user_id in newcomers.keys():
if user_id not in newcomer_revisions.keys():
newcomers_not_in_revision_set.add(key)
print("{0} newcomers in revision set".format(len(newcomers_in_revision_set)))
print("{0} revisions not in newcomer set".format(len(revisions_not_in_newcomer_set)))
print("{0} newcomers not in revision set".format(len(newcomers_not_in_revision_set)))
# ### Create Revision Dataframe with Information on users
all_revisions = []
for user_id, revisions in newcomer_revisions.items():
newcomer = newcomers[user_id]
first_revision_time = None
if(len(revisions)>0):
first_revision_time = parser.parse(revisions[0]['revision time'])
treat_time = first_revision_time + datetime.timedelta(hours=48)
for revision in revisions:
revision_time = parser.parse(revision['revision time'])
revision['registration'] = newcomer['registration.date']
revision['days.since.registration'] = (revision_time -
revision['registration']).days
revision['days.since.simulated.treat'] = (revision_time - treat_time).days
revision['edits.6.months'] = newcomer['edit count']
all_revisions.append(revision)
all_revs_file = os.path.join(resultsdir, lang+"_revisions_with_user_11.2017.csv")
pd.DataFrame(all_revisions).to_csv(all_revs_file)
print('saved file: {}'.format(all_revs_file))
# ### Create User Dataframe with Summary Stats on Revisions
# For the power analysis, assume that the participant will be 'treated' within 2 days (48 hrs) of making their first edit. Then count the following information starting 2 days after their first edit:
# * edits in the 7 day period between 3-4 weeks
# * edits in the 4 week period between 1-2 months
# * edits in the 12 week period betwen 2-6 months
for user_id, newcomer in newcomers.items():
for key in ['first.n.goodfaith.mean',
'first.n.goodfaith.median',
'first.n.damaging.mean',
'first.n.damaging.median',
'first.goodfaith.score',
'first.damaging.score']:
newcomer[key] = None
newcomer['ORES.rated.revisions'] = 0
## edits counted from registration time
newcomer['edits.6.months'] = 0
newcomer['edits.2.weeks'] = 0
newcomer['edits.4.weeks'] = 0
newcomer['edits.8.weeks'] = 0
newcomer['edits.12.weeks'] = 0
## time interval simulation for power analysis
## following the outcome variables used in this paper:
## https://osf.io/preprints/socarxiv/8qsv6/
newcomer['edits.3.4.weeks'] = 0
newcomer['edits.4.8.weeks'] = 0
newcomer['edits.8.24.weeks'] = 0
## SURVIVAL MEASURE OVER 20 WEEK PERIODS (5 MONTHS)
## BASED ON WHETHER THEY MADE AT LEAST ONE EDIT
## AT ANY TIME AFTER THE OBSERVED PERIOD
## This measure uses days.since.simulated.treat
## for calculation
## We only consider 5 months, since we only have six
## months of information, and since some joined at the end of Nov
survival_weeks = 20
for i in range(1, survival_weeks+1):
newcomer['survival.week.period.' + str(i)] = False
if user_id in newcomer_revisions:
revisions = newcomer_revisions[user_id]
### COUNT REVISIONS OVER A PERIOD OF TIME
newcomer['edits.6.months'] = len(revisions)
newcomer['edits.2.weeks'] = len([x for x in revisions if x['days.since.registration'] <= 7*2])
newcomer['edits.4.weeks'] = len([x for x in revisions if x['days.since.registration'] <= 7*4])
newcomer['edits.8.weeks'] = len([x for x in revisions if x['days.since.registration'] <= 7*8])
newcomer['edits.12.weeks'] = len([x for x in revisions if x['days.since.registration'] <= 7*12])
## SET TIME INTERVAL SIMULATION FOR POWER ANALYSIS
newcomer['edits.3.4.weeks'] = len([x for x in revisions if
x['days.since.simulated.treat'] >= 7*3 and
x['days.since.simulated.treat'] < 7*4 ])
newcomer['edits.4.8.weeks'] = len([x for x in revisions if
x['days.since.simulated.treat'] >= 7*4 and
x['days.since.simulated.treat'] < 7*8 ])
newcomer['edits.8.24.weeks'] = len([x for x in revisions if
x['days.since.simulated.treat'] >= 8*4 and
x['days.since.simulated.treat'] < 7*24 ])
## SET SURVIVAL COLUMNS
## first, find eligible revisions
## Negative day scores are for edits made in the 48 hours from the point of the first edit
## and include the first edit
survival_revision_days = [x['days.since.simulated.treat'] for x in revisions if
math.ceil(x['days.since.simulated.treat'] / 7) <= survival_weeks and
x['days.since.simulated.treat'] > 0]
## If there is at least one eligible revision
## then we should update all appropriate survival periods to True
if(len(survival_revision_days)>0):
final_revision_day = survival_revision_days[-1]
final_revision_period = math.ceil(final_revision_day / 7)
for i in range(1, final_revision_period+1):
newcomer['survival.week.period.' + str(i)] = True
### AGGREGATE ORES SCORES
eligible_revisions = [x for x in revisions if
x['goodfaith']!='na' and x['damaging']!='na' and
x['goodfaith']!='error' and x['damaging']!='error']
newcomer['ORES.rated.revisions'] = len(eligible_revisions)
if(len(eligible_revisions)>0):
newcomer['first.goodfaith.score'] = float(eligible_revisions[0]['goodfaith'])
newcomer['first.damaging.score'] = float(eligible_revisions[0]['damaging'])
newcomer['first.n.goodfaith.mean'] = np.mean([float(x['goodfaith']) for x in eligible_revisions])
newcomer['first.n.goodfaith.median'] = np.median([float(x['goodfaith']) for x in eligible_revisions])
newcomer['first.n.damaging.mean'] = np.mean([float(x['damaging']) for x in eligible_revisions])
newcomer['first.n.damaging.median'] = np.median([float(x['damaging']) for x in eligible_revisions])
ores_file = os.path.join(resultsdir, lang+"_newcomers_with_ores_scores_11.2017.csv")
pd.DataFrame(list(newcomers.values())).to_csv(ores_file)
print('saved file: {}'.format(ores_file))
# ### Make Survival Dataframe
survival_week_records = []
for user_id, newcomer in newcomers.items():
if(int(newcomer['edit count'])>0):
for i in range(1, survival_weeks+1):
survival_week_records.append({"user.id": user_id,
"week":i,
"first.goodfaith.score":newcomer['first.goodfaith.score'],
"first.damaging.score":newcomer['first.damaging.score'],
"survived":int(newcomer["survival.week.period."+str(i)])})
print("Generated {0} survival week periods".format(len(survival_week_records)))
survival_file = os.path.join(resultsdir, lang+"_newcomer_survival_week_periods.2017.csv")
| pd.DataFrame(survival_week_records) | pandas.DataFrame |
import math
import queue
from datetime import datetime, timedelta, timezone
import pandas as pd
from storey import build_flow, SyncEmitSource, Reduce, Table, AggregateByKey, FieldAggregator, NoopDriver, \
DataframeSource
from storey.dtypes import SlidingWindows, FixedWindows, EmitAfterMaxEvent, EmitEveryEvent
test_base_time = datetime.fromisoformat("2020-07-21T21:40:00+00:00")
def append_return(lst, x):
lst.append(x)
return lst
def test_sliding_window_simple_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg", "min", "max"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0,
'number_of_stuff_max_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1,
'number_of_stuff_max_24h': 1, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_sum_24h': 3, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2,
'number_of_stuff_max_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_stuff_min_1h': 1,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 3, 'number_of_stuff_max_2h': 3,
'number_of_stuff_max_24h': 3, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 1.5, 'number_of_stuff_avg_24h': 1.5},
{'col1': 4, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 10, 'number_of_stuff_sum_24h': 10, 'number_of_stuff_min_1h': 2,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 4, 'number_of_stuff_max_2h': 4,
'number_of_stuff_max_24h': 4, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 15, 'number_of_stuff_sum_24h': 15, 'number_of_stuff_min_1h': 3,
'number_of_stuff_min_2h': 1, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 5, 'number_of_stuff_max_2h': 5,
'number_of_stuff_max_24h': 5, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 2.5},
{'col1': 6, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 21, 'number_of_stuff_min_1h': 4,
'number_of_stuff_min_2h': 2, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 6, 'number_of_stuff_max_2h': 6,
'number_of_stuff_max_24h': 6, 'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'number_of_stuff_sum_1h': 18, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 28, 'number_of_stuff_min_1h': 5,
'number_of_stuff_min_2h': 3, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 7, 'number_of_stuff_max_2h': 7,
'number_of_stuff_max_24h': 7, 'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 3.5},
{'col1': 8, 'number_of_stuff_sum_1h': 21, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_sum_24h': 36, 'number_of_stuff_min_1h': 6,
'number_of_stuff_min_2h': 4, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 8, 'number_of_stuff_max_2h': 8,
'number_of_stuff_max_24h': 8, 'number_of_stuff_avg_1h': 7.0, 'number_of_stuff_avg_2h': 6.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'number_of_stuff_sum_1h': 24, 'number_of_stuff_sum_2h': 35, 'number_of_stuff_sum_24h': 45, 'number_of_stuff_min_1h': 7,
'number_of_stuff_min_2h': 5, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 9, 'number_of_stuff_max_2h': 9,
'number_of_stuff_max_24h': 9, 'number_of_stuff_avg_1h': 8.0, 'number_of_stuff_avg_2h': 7.0, 'number_of_stuff_avg_24h': 4.5}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_sparse_data():
controller = build_flow([
SyncEmitSource(),
AggregateByKey(
[FieldAggregator("number_of_stuff1", "col1", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_stuff2", "col2", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
controller.emit({'col1': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.emit({'col2': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': math.nan, 'number_of_stuff2_avg_24h': math.nan, 'number_of_stuff2_avg_2h': math.nan,
'number_of_stuff2_max_1h': math.nan, 'number_of_stuff2_max_24h': math.nan, 'number_of_stuff2_max_2h': math.nan,
'number_of_stuff2_min_1h': math.nan, 'number_of_stuff2_min_24h': math.nan, 'number_of_stuff2_min_2h': math.nan,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col1': 1, 'number_of_stuff1_avg_1h': 0.5, 'number_of_stuff1_avg_24h': 0.5, 'number_of_stuff1_avg_2h': 0.5,
'number_of_stuff1_max_1h': 1, 'number_of_stuff1_max_24h': 1, 'number_of_stuff1_max_2h': 1,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 1, 'number_of_stuff1_sum_24h': 1, 'number_of_stuff1_sum_2h': 1,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 1, 'number_of_stuff1_avg_1h': 0.5, 'number_of_stuff1_avg_24h': 0.5, 'number_of_stuff1_avg_2h': 0.5,
'number_of_stuff1_max_1h': 1, 'number_of_stuff1_max_24h': 1, 'number_of_stuff1_max_2h': 1,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 1, 'number_of_stuff1_sum_24h': 1, 'number_of_stuff1_sum_2h': 1,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col1': 2, 'number_of_stuff1_avg_1h': 1.0, 'number_of_stuff1_avg_24h': 1.0, 'number_of_stuff1_avg_2h': 1.0,
'number_of_stuff1_max_1h': 2, 'number_of_stuff1_max_24h': 2, 'number_of_stuff1_max_2h': 2,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 3, 'number_of_stuff1_sum_24h': 3, 'number_of_stuff1_sum_2h': 3,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col2': 2, 'number_of_stuff1_avg_1h': 1.0, 'number_of_stuff1_avg_24h': 1.0, 'number_of_stuff1_avg_2h': 1.0,
'number_of_stuff1_max_1h': 2, 'number_of_stuff1_max_24h': 2, 'number_of_stuff1_max_2h': 2,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 3, 'number_of_stuff1_sum_24h': 3, 'number_of_stuff1_sum_2h': 3,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col1': 3, 'number_of_stuff1_avg_1h': 2.0, 'number_of_stuff1_avg_24h': 1.5, 'number_of_stuff1_avg_2h': 1.5,
'number_of_stuff1_max_1h': 3, 'number_of_stuff1_max_24h': 3, 'number_of_stuff1_max_2h': 3,
'number_of_stuff1_min_1h': 1, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 6, 'number_of_stuff1_sum_24h': 6, 'number_of_stuff1_sum_2h': 6,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col2': 3, 'number_of_stuff1_avg_1h': 2.0, 'number_of_stuff1_avg_24h': 1.5, 'number_of_stuff1_avg_2h': 1.5,
'number_of_stuff1_max_1h': 3, 'number_of_stuff1_max_24h': 3, 'number_of_stuff1_max_2h': 3,
'number_of_stuff1_min_1h': 1, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 6, 'number_of_stuff1_sum_24h': 6, 'number_of_stuff1_sum_2h': 6,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col1': 4, 'number_of_stuff1_avg_1h': 3.0, 'number_of_stuff1_avg_24h': 2.0, 'number_of_stuff1_avg_2h': 2.0,
'number_of_stuff1_max_1h': 4, 'number_of_stuff1_max_24h': 4, 'number_of_stuff1_max_2h': 4,
'number_of_stuff1_min_1h': 2, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 9, 'number_of_stuff1_sum_24h': 10, 'number_of_stuff1_sum_2h': 10,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col2': 4, 'number_of_stuff1_avg_1h': 3.0, 'number_of_stuff1_avg_24h': 2.0, 'number_of_stuff1_avg_2h': 2.0,
'number_of_stuff1_max_1h': 4, 'number_of_stuff1_max_24h': 4, 'number_of_stuff1_max_2h': 4,
'number_of_stuff1_min_1h': 2, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 9, 'number_of_stuff1_sum_24h': 10, 'number_of_stuff1_sum_2h': 10,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col1': 5, 'number_of_stuff1_avg_1h': 4.0, 'number_of_stuff1_avg_24h': 2.5, 'number_of_stuff1_avg_2h': 3.0,
'number_of_stuff1_max_1h': 5, 'number_of_stuff1_max_24h': 5, 'number_of_stuff1_max_2h': 5,
'number_of_stuff1_min_1h': 3, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 1,
'number_of_stuff1_sum_1h': 12, 'number_of_stuff1_sum_24h': 15, 'number_of_stuff1_sum_2h': 15,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col2': 5, 'number_of_stuff1_avg_1h': 4.0, 'number_of_stuff1_avg_24h': 2.5, 'number_of_stuff1_avg_2h': 3.0,
'number_of_stuff1_max_1h': 5, 'number_of_stuff1_max_24h': 5, 'number_of_stuff1_max_2h': 5,
'number_of_stuff1_min_1h': 3, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 1,
'number_of_stuff1_sum_1h': 12, 'number_of_stuff1_sum_24h': 15, 'number_of_stuff1_sum_2h': 15,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col1': 6, 'number_of_stuff1_avg_1h': 5.0, 'number_of_stuff1_avg_24h': 3.0, 'number_of_stuff1_avg_2h': 4.0,
'number_of_stuff1_max_1h': 6, 'number_of_stuff1_max_24h': 6, 'number_of_stuff1_max_2h': 6,
'number_of_stuff1_min_1h': 4, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 2,
'number_of_stuff1_sum_1h': 15, 'number_of_stuff1_sum_24h': 21, 'number_of_stuff1_sum_2h': 20,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col2': 6, 'number_of_stuff1_avg_1h': 5.0, 'number_of_stuff1_avg_24h': 3.0, 'number_of_stuff1_avg_2h': 4.0,
'number_of_stuff1_max_1h': 6, 'number_of_stuff1_max_24h': 6, 'number_of_stuff1_max_2h': 6,
'number_of_stuff1_min_1h': 4, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 2,
'number_of_stuff1_sum_1h': 15, 'number_of_stuff1_sum_24h': 21, 'number_of_stuff1_sum_2h': 20,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col1': 7, 'number_of_stuff1_avg_1h': 6.0, 'number_of_stuff1_avg_24h': 3.5, 'number_of_stuff1_avg_2h': 5.0,
'number_of_stuff1_max_1h': 7, 'number_of_stuff1_max_24h': 7, 'number_of_stuff1_max_2h': 7,
'number_of_stuff1_min_1h': 5, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 3,
'number_of_stuff1_sum_1h': 18, 'number_of_stuff1_sum_24h': 28, 'number_of_stuff1_sum_2h': 25,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col2': 7, 'number_of_stuff1_avg_1h': 6.0, 'number_of_stuff1_avg_24h': 3.5, 'number_of_stuff1_avg_2h': 5.0,
'number_of_stuff1_max_1h': 7, 'number_of_stuff1_max_24h': 7, 'number_of_stuff1_max_2h': 7,
'number_of_stuff1_min_1h': 5, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 3,
'number_of_stuff1_sum_1h': 18, 'number_of_stuff1_sum_24h': 28, 'number_of_stuff1_sum_2h': 25,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col1': 8, 'number_of_stuff1_avg_1h': 7.0, 'number_of_stuff1_avg_24h': 4.0, 'number_of_stuff1_avg_2h': 6.0,
'number_of_stuff1_max_1h': 8, 'number_of_stuff1_max_24h': 8, 'number_of_stuff1_max_2h': 8,
'number_of_stuff1_min_1h': 6, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 4,
'number_of_stuff1_sum_1h': 21, 'number_of_stuff1_sum_24h': 36, 'number_of_stuff1_sum_2h': 30,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col2': 8, 'number_of_stuff1_avg_1h': 7.0, 'number_of_stuff1_avg_24h': 4.0, 'number_of_stuff1_avg_2h': 6.0,
'number_of_stuff1_max_1h': 8, 'number_of_stuff1_max_24h': 8, 'number_of_stuff1_max_2h': 8,
'number_of_stuff1_min_1h': 6, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 4,
'number_of_stuff1_sum_1h': 21, 'number_of_stuff1_sum_24h': 36, 'number_of_stuff1_sum_2h': 30,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col1': 9, 'number_of_stuff1_avg_1h': 8.0, 'number_of_stuff1_avg_24h': 4.5, 'number_of_stuff1_avg_2h': 7.0,
'number_of_stuff1_max_1h': 9, 'number_of_stuff1_max_24h': 9, 'number_of_stuff1_max_2h': 9,
'number_of_stuff1_min_1h': 7, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 5,
'number_of_stuff1_sum_1h': 24, 'number_of_stuff1_sum_24h': 45, 'number_of_stuff1_sum_2h': 35,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col2': 9, 'number_of_stuff1_avg_1h': 8.0, 'number_of_stuff1_avg_24h': 4.5, 'number_of_stuff1_avg_2h': 7.0,
'number_of_stuff1_max_1h': 9, 'number_of_stuff1_max_24h': 9, 'number_of_stuff1_max_2h': 9,
'number_of_stuff1_min_1h': 7, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 5,
'number_of_stuff1_sum_1h': 24, 'number_of_stuff1_sum_24h': 45, 'number_of_stuff1_sum_2h': 35,
'number_of_stuff2_avg_1h': 8.0, 'number_of_stuff2_avg_24h': 4.5, 'number_of_stuff2_avg_2h': 7.0,
'number_of_stuff2_max_1h': 9, 'number_of_stuff2_max_24h': 9, 'number_of_stuff2_max_2h': 9,
'number_of_stuff2_min_1h': 7, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 5,
'number_of_stuff2_sum_1h': 24, 'number_of_stuff2_sum_24h': 45, 'number_of_stuff2_sum_2h': 35}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_sparse_data_uneven_feature_occurrence():
controller = build_flow([
SyncEmitSource(),
AggregateByKey(
[FieldAggregator("number_of_stuff1", "col1", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_stuff2", "col2", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
controller.emit({'col1': 0}, 'tal', test_base_time)
for i in range(10):
controller.emit({'col2': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': math.nan, 'number_of_stuff2_avg_24h': math.nan, 'number_of_stuff2_avg_2h': math.nan,
'number_of_stuff2_max_1h': math.nan, 'number_of_stuff2_max_24h': math.nan, 'number_of_stuff2_max_2h': math.nan,
'number_of_stuff2_min_1h': math.nan, 'number_of_stuff2_min_24h': math.nan, 'number_of_stuff2_min_2h': math.nan,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 1, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col2': 2, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col2': 3, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col2': 4, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col2': 5, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col2': 6, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col2': 7, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col2': 8, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col2': 9, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 8.0, 'number_of_stuff2_avg_24h': 4.5, 'number_of_stuff2_avg_2h': 7.0,
'number_of_stuff2_max_1h': 9, 'number_of_stuff2_max_24h': 9, 'number_of_stuff2_max_2h': 9,
'number_of_stuff2_min_1h': 7, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 5,
'number_of_stuff2_sum_1h': 24, 'number_of_stuff2_sum_24h': 45, 'number_of_stuff2_sum_2h': 35}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_multiple_keys_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, f'{i % 2}', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0,
'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1,
'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 2, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2, 'number_of_stuff_sum_24h': 2,
'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 4, 'number_of_stuff_sum_2h': 4, 'number_of_stuff_sum_24h': 4,
'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 4, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6,
'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 9, 'number_of_stuff_sum_24h': 9,
'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 6, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12, 'number_of_stuff_sum_24h': 12,
'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'number_of_stuff_sum_1h': 16, 'number_of_stuff_sum_2h': 16, 'number_of_stuff_sum_24h': 16,
'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 8, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 20,
'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'number_of_stuff_sum_1h': 25, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 25,
'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 5.0}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_aggregations_with_filters_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m'),
aggr_filter=lambda element: element['is_valid'] == 0)],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i, 'is_valid': i % 2}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'is_valid': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0,
'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'is_valid': 1, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0,
'number_of_stuff_avg_24h': 0.0},
{'col1': 2, 'is_valid': 0, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2,
'number_of_stuff_sum_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0,
'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'is_valid': 1, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2,
'number_of_stuff_sum_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0,
'number_of_stuff_avg_24h': 1.0},
{'col1': 4, 'is_valid': 0, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6,
'number_of_stuff_sum_24h': 6, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0,
'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'is_valid': 1, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6,
'number_of_stuff_sum_24h': 6, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0,
'number_of_stuff_avg_24h': 2.0},
{'col1': 6, 'is_valid': 0, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12,
'number_of_stuff_sum_24h': 12, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0,
'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'is_valid': 1, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12,
'number_of_stuff_sum_24h': 12, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0,
'number_of_stuff_avg_24h': 3.0},
{'col1': 8, 'is_valid': 0, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20,
'number_of_stuff_sum_24h': 20, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0,
'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'is_valid': 1, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20,
'number_of_stuff_sum_24h': 20, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0,
'number_of_stuff_avg_24h': 4.0}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_aggregations_with_max_values_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("num_hours_with_stuff_in_the_last_24h", "col1", ["count"],
SlidingWindows(['24h'], '1h'),
max_value=5)],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=10 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'num_hours_with_stuff_in_the_last_24h_count_24h': 1},
{'col1': 1, 'num_hours_with_stuff_in_the_last_24h_count_24h': 2},
{'col1': 2, 'num_hours_with_stuff_in_the_last_24h_count_24h': 3},
{'col1': 3, 'num_hours_with_stuff_in_the_last_24h_count_24h': 4},
{'col1': 4, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 5, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 6, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 7, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 8, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 9, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_simple_aggregation_flow_multiple_fields():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_things", "col2", ["count"],
SlidingWindows(['1h', '2h'], '15m')),
FieldAggregator("abc", "col3", ["sum"],
SlidingWindows(['24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i, 'col2': i * 1.2, 'col3': i * 2 + 4}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'col2': 0.0, 'col3': 4, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_things_count_1h': 1, 'number_of_things_count_2h': 1,
'abc_sum_24h': 4, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'col2': 1.2, 'col3': 6, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1,
'number_of_stuff_sum_24h': 1, 'number_of_things_count_1h': 2, 'number_of_things_count_2h': 2,
'abc_sum_24h': 10, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'col2': 2.4, 'col3': 8, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3,
'number_of_stuff_sum_24h': 3, 'number_of_things_count_1h': 3, 'number_of_things_count_2h': 3,
'abc_sum_24h': 18, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'col2': 3.5999999999999996, 'col3': 10, 'number_of_stuff_sum_1h': 6,
'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_things_count_1h': 4,
'number_of_things_count_2h': 4, 'abc_sum_24h': 28, 'number_of_stuff_avg_1h': 1.5, 'number_of_stuff_avg_2h': 1.5,
'number_of_stuff_avg_24h': 1.5},
{'col1': 4, 'col2': 4.8, 'col3': 12, 'number_of_stuff_sum_1h': 10, 'number_of_stuff_sum_2h': 10,
'number_of_stuff_sum_24h': 10, 'number_of_things_count_1h': 5, 'number_of_things_count_2h': 5,
'abc_sum_24h': 40, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'col2': 6.0, 'col3': 14, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 15,
'number_of_stuff_sum_24h': 15, 'number_of_things_count_1h': 6, 'number_of_things_count_2h': 6,
'abc_sum_24h': 54, 'number_of_stuff_avg_1h': 2.5, 'number_of_stuff_avg_2h': 2.5, 'number_of_stuff_avg_24h': 2.5},
{'col1': 6, 'col2': 7.199999999999999, 'col3': 16, 'number_of_stuff_sum_1h': 21,
'number_of_stuff_sum_2h': 21, 'number_of_stuff_sum_24h': 21, 'number_of_things_count_1h': 7,
'number_of_things_count_2h': 7, 'abc_sum_24h': 70, 'number_of_stuff_avg_1h': 3.0,
'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'col2': 8.4, 'col3': 18, 'number_of_stuff_sum_1h': 28, 'number_of_stuff_sum_2h': 28,
'number_of_stuff_sum_24h': 28, 'number_of_things_count_1h': 8, 'number_of_things_count_2h': 8,
'abc_sum_24h': 88, 'number_of_stuff_avg_1h': 3.5, 'number_of_stuff_avg_2h': 3.5, 'number_of_stuff_avg_24h': 3.5},
{'col1': 8, 'col2': 9.6, 'col3': 20, 'number_of_stuff_sum_1h': 36, 'number_of_stuff_sum_2h': 36,
'number_of_stuff_sum_24h': 36, 'number_of_things_count_1h': 9, 'number_of_things_count_2h': 9,
'abc_sum_24h': 108, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'col2': 10.799999999999999, 'col3': 22, 'number_of_stuff_sum_1h': 45,
'number_of_stuff_sum_2h': 45, 'number_of_stuff_sum_24h': 45,
'number_of_things_count_1h': 10, 'number_of_things_count_2h': 10, 'abc_sum_24h': 130,
'number_of_stuff_avg_1h': 4.5, 'number_of_stuff_avg_2h': 4.5, 'number_of_stuff_avg_24h': 4.5}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_simple_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["count"],
FixedWindows(['1h', '2h', '3h', '24h']))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 1, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 2, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 3, 'number_of_stuff_count_1h': 3, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 4,
'number_of_stuff_count_24h': 4},
{'col1': 4, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 5,
'number_of_stuff_count_24h': 5},
{'col1': 5, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 5, 'number_of_stuff_count_3h': 6,
'number_of_stuff_count_24h': 6},
{'col1': 6, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 7, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 8, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 9, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 4,
'number_of_stuff_count_24h': 4}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_aggregation_with_uncommon_windows_flow():
time_format = '%Y-%m-%d %H:%M:%S.%f'
columns = ['sample_time', 'signal', 'isotope']
data = [[datetime.strptime('2021-05-30 16:42:15.797000', time_format).replace(tzinfo=timezone.utc), 790.235, 'U235'],
[datetime.strptime('2021-05-30 16:45:15.798000', time_format).replace(tzinfo=timezone.utc), 498.491, 'U235'],
[datetime.strptime('2021-05-30 16:48:15.799000', time_format).replace(tzinfo=timezone.utc), 34650.00343, 'U235'],
[datetime.strptime('2021-05-30 16:51:15.800000', time_format).replace(tzinfo=timezone.utc), 189.823, 'U235'],
[datetime.strptime('2021-05-30 16:54:15.801000', time_format).replace(tzinfo=timezone.utc), 379.524, 'U235'],
[datetime.strptime('2021-05-30 16:57:15.802000', time_format).replace(tzinfo=timezone.utc), 2225.4952, 'U235'],
[datetime.strptime('2021-05-30 17:00:15.803000', time_format).replace(tzinfo=timezone.utc), 1049.0903, 'U235'],
[datetime.strptime('2021-05-30 17:03:15.804000', time_format).replace(tzinfo=timezone.utc), 41905.63447, 'U235'],
[datetime.strptime('2021-05-30 17:06:15.805000', time_format).replace(tzinfo=timezone.utc), 4987.6764, 'U235'],
[datetime.strptime('2021-05-30 17:09:15.806000', time_format).replace(tzinfo=timezone.utc), 67657.11975, 'U235'],
[datetime.strptime('2021-05-30 17:12:15.807000', time_format).replace(tzinfo=timezone.utc), 56173.06327, 'U235'],
[datetime.strptime('2021-05-30 17:15:15.808000', time_format).replace(tzinfo=timezone.utc), 14249.67394, 'U235'],
[datetime.strptime('2021-05-30 17:18:15.809000', time_format).replace(tzinfo=timezone.utc), 656.831, 'U235'],
[datetime.strptime('2021-05-30 17:21:15.810000', time_format).replace(tzinfo=timezone.utc), 5768.4822, 'U235'],
[datetime.strptime('2021-05-30 17:24:15.811000', time_format).replace(tzinfo=timezone.utc), 929.028, 'U235'],
[datetime.strptime('2021-05-30 17:27:15.812000', time_format).replace(tzinfo=timezone.utc), 2585.9646, 'U235'],
[datetime.strptime('2021-05-30 17:30:15.813000', time_format).replace(tzinfo=timezone.utc), 358.918, 'U235']]
df = pd.DataFrame(data, columns=columns)
controller = build_flow([
DataframeSource(df, time_field="sample_time", key_field="isotope"),
AggregateByKey([FieldAggregator("samples", "signal", ["count"],
FixedWindows(['15m', '25m', '45m', '1h']))], Table("U235_test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
termination_result = controller.await_termination()
expected = [{'samples_count_15m': 1.0, 'samples_count_25m': 1.0, 'samples_count_45m': 1.0, 'samples_count_1h': 1.0,
'sample_time': pd.Timestamp('2021-05-30 16:42:15.797000+0000', tz='UTC'), 'signal': 790.235,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 2.0, 'samples_count_45m': 2.0, 'samples_count_1h': 2.0,
'sample_time': pd.Timestamp('2021-05-30 16:45:15.798000+0000', tz='UTC'), 'signal': 498.491,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 3.0, 'samples_count_45m': 3.0, 'samples_count_1h': 3.0,
'sample_time': pd.Timestamp('2021-05-30 16:48:15.799000+0000', tz='UTC'), 'signal': 34650.00343,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 4.0, 'samples_count_45m': 4.0, 'samples_count_1h': 4.0,
'sample_time': pd.Timestamp('2021-05-30 16:51:15.800000+0000', tz='UTC'), 'signal': 189.823,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 5.0, 'samples_count_45m': 5.0, 'samples_count_1h': 5.0,
'sample_time': pd.Timestamp('2021-05-30 16:54:15.801000+0000', tz='UTC'), 'signal': 379.524,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 6.0, 'samples_count_45m': 6.0, 'samples_count_1h': 6.0,
'sample_time': pd.Timestamp('2021-05-30 16:57:15.802000+0000', tz='UTC'), 'signal': 2225.4952,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 1.0, 'samples_count_45m': 7.0, 'samples_count_1h': 1.0,
'sample_time': pd.Timestamp('2021-05-30 17:00:15.803000+0000', tz='UTC'), 'signal': 1049.0903,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 2.0, 'samples_count_45m': 8.0, 'samples_count_1h': 2.0,
'sample_time': pd.Timestamp('2021-05-30 17:03:15.804000+0000', tz='UTC'), 'signal': 41905.63447,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 3.0, 'samples_count_45m': 9.0, 'samples_count_1h': 3.0,
'sample_time': pd.Timestamp('2021-05-30 17:06:15.805000+0000', tz='UTC'), 'signal': 4987.6764,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 4.0, 'samples_count_45m': 10.0, 'samples_count_1h': 4.0,
'sample_time': pd.Timestamp('2021-05-30 17:09:15.806000+0000', tz='UTC'), 'signal': 67657.11975,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 5.0, 'samples_count_45m': 11.0, 'samples_count_1h': 5.0,
'sample_time': pd.Timestamp('2021-05-30 17:12:15.807000+0000', tz='UTC'), 'signal': 56173.06327,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 6.0, 'samples_count_45m': 1.0, 'samples_count_1h': 6.0,
'sample_time': pd.Timestamp('2021-05-30 17:15:15.808000+0000', tz='UTC'), 'signal': 14249.67394,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 7.0, 'samples_count_45m': 2.0, 'samples_count_1h': 7.0,
'sample_time': pd.Timestamp('2021-05-30 17:18:15.809000+0000', tz='UTC'), 'signal': 656.831,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 8.0, 'samples_count_45m': 3.0, 'samples_count_1h': 8.0,
'sample_time': pd.Timestamp('2021-05-30 17:21:15.810000+0000', tz='UTC'), 'signal': 5768.4822,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 9.0, 'samples_count_45m': 4.0, 'samples_count_1h': 9.0,
'sample_time': pd.Timestamp('2021-05-30 17:24:15.811000+0000', tz='UTC'), 'signal': 929.028,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 1.0, 'samples_count_45m': 5.0, 'samples_count_1h': 10.0,
'sample_time': pd.Timestamp('2021-05-30 17:27:15.812000+0000', tz='UTC'), 'signal': 2585.9646,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 2.0, 'samples_count_45m': 6.0, 'samples_count_1h': 11.0,
'sample_time': pd.Timestamp('2021-05-30 17:30:15.813000+0000', tz='UTC'), 'signal': 358.918,
'isotope': 'U235'}]
assert termination_result == expected, \
f'actual did not match expected. \n actual: {termination_result} \n expected: {expected}'
def test_fixed_window_aggregation_with_multiple_keys_flow():
time_format = '%Y-%m-%d %H:%M:%S.%f'
columns = ['sample_time', 'signal', 'isotope']
data = [[datetime.strptime('2021-05-30 16:42:15.797000', time_format).replace(tzinfo=timezone.utc), 790.235, 'U235'],
[datetime.strptime('2021-05-30 16:45:15.798000', time_format).replace(tzinfo=timezone.utc), 498.491, 'U235'],
[datetime.strptime('2021-05-30 16:48:15.799000', time_format).replace(tzinfo=timezone.utc), 34650.00343, 'U238'],
[datetime.strptime('2021-05-30 16:51:15.800000', time_format).replace(tzinfo=timezone.utc), 189.823, 'U238'],
[datetime.strptime('2021-05-30 16:54:15.801000', time_format).replace(tzinfo=timezone.utc), 379.524, 'U238'],
[datetime.strptime('2021-05-30 16:57:15.802000', time_format).replace(tzinfo=timezone.utc), 2225.4952, 'U238'],
[datetime.strptime('2021-05-30 17:00:15.803000', time_format).replace(tzinfo=timezone.utc), 1049.0903, 'U235'],
[datetime.strptime('2021-05-30 17:03:15.804000', time_format).replace(tzinfo=timezone.utc), 41905.63447, 'U238'],
[datetime.strptime('2021-05-30 17:06:15.805000', time_format).replace(tzinfo=timezone.utc), 4987.6764, 'U235'],
[datetime.strptime('2021-05-30 17:09:15.806000', time_format).replace(tzinfo=timezone.utc), 67657.11975, 'U235'],
[datetime.strptime('2021-05-30 17:12:15.807000', time_format).replace(tzinfo=timezone.utc), 56173.06327, 'U235'],
[datetime.strptime('2021-05-30 17:15:15.808000', time_format).replace(tzinfo=timezone.utc), 14249.67394, 'U238'],
[datetime.strptime('2021-05-30 17:18:15.809000', time_format).replace(tzinfo=timezone.utc), 656.831, 'U238'],
[datetime.strptime('2021-05-30 17:21:15.810000', time_format).replace(tzinfo=timezone.utc), 5768.4822, 'U235'],
[datetime.strptime('2021-05-30 17:24:15.811000', time_format).replace(tzinfo=timezone.utc), 929.028, 'U235'],
[datetime.strptime('2021-05-30 17:27:15.812000', time_format).replace(tzinfo=timezone.utc), 2585.9646, 'U238'],
[datetime.strptime('2021-05-30 17:30:15.813000', time_format).replace(tzinfo=timezone.utc), 358.918, 'U238']]
df = pd.DataFrame(data, columns=columns)
controller = build_flow([
DataframeSource(df, time_field="sample_time", key_field="isotope"),
AggregateByKey([FieldAggregator("samples", "signal", ["count"],
FixedWindows(['10m', '15m']))], Table("U235_test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
termination_result = controller.await_termination()
expected = [
{'samples_count_10m': 1.0, 'samples_count_15m': 1.0,
'sample_time': pd.Timestamp('2021-05-30 16:42:15.797000+0000', tz='UTC'), 'signal': 790.235, 'isotope': 'U235'},
{'samples_count_10m': 2.0, 'samples_count_15m': 1.0,
'sample_time': pd.Timestamp('2021-05-30 16:45:15.798000+0000', tz='UTC'), 'signal': 498.491, 'isotope': 'U235'},
{'samples_count_10m': 1.0, 'samples_count_15m': 1.0,
'sample_time': pd.Timestamp('2021-05-30 16:48:15.799000+0000', tz='UTC'), 'signal': 34650.00343, 'isotope': 'U238'},
{'samples_count_10m': 1.0, 'samples_count_15m': 2.0,
'sample_time': pd.Timestamp('2021-05-30 16:51:15.800000+0000', tz='UTC'), 'signal': 189.823, 'isotope': 'U238'},
{'samples_count_10m': 2.0, 'samples_count_15m': 3.0,
'sample_time': pd.Timestamp('2021-05-30 16:54:15.801000+0000', tz='UTC'), 'signal': 379.524, 'isotope': 'U238'},
{'samples_count_10m': 3.0, 'samples_count_15m': 4.0,
'sample_time': pd.Timestamp('2021-05-30 16:57:15.802000+0000', tz='UTC'), 'signal': 2225.4952, 'isotope': 'U238'},
{'samples_count_10m': 1.0, 'samples_count_15m': 1.0,
'sample_time': pd.Timestamp('2021-05-30 17:00:15.803000+0000', tz='UTC'), 'signal': 1049.0903, 'isotope': 'U235'},
{'samples_count_10m': 1.0, 'samples_count_15m': 1.0,
'sample_time': pd.Timestamp('2021-05-30 17:03:15.804000+0000', tz='UTC'), 'signal': 41905.63447, 'isotope': 'U238'},
{'samples_count_10m': 2.0, 'samples_count_15m': 2.0,
'sample_time': pd.Timestamp('2021-05-30 17:06:15.805000+0000', tz='UTC'), 'signal': 4987.6764, 'isotope': 'U235'},
{'samples_count_10m': 3.0, 'samples_count_15m': 3.0,
'sample_time': pd.Timestamp('2021-05-30 17:09:15.806000+0000', tz='UTC'), 'signal': 67657.11975, 'isotope': 'U235'},
{'samples_count_10m': 1.0, 'samples_count_15m': 4.0,
'sample_time': pd.Timestamp('2021-05-30 17:12:15.807000+0000', tz='UTC'), 'signal': 56173.06327, 'isotope': 'U235'},
{'samples_count_10m': 1.0, 'samples_count_15m': 1.0,
'sample_time': pd.Timestamp('2021-05-30 17:15:15.808000+0000', tz='UTC'), 'signal': 14249.67394, 'isotope': 'U238'},
{'samples_count_10m': 2.0, 'samples_count_15m': 2.0,
'sample_time': pd.Timestamp('2021-05-30 17:18:15.809000+0000', tz='UTC'), 'signal': 656.831, 'isotope': 'U238'},
{'samples_count_10m': 1.0, 'samples_count_15m': 1.0,
'sample_time': pd.Timestamp('2021-05-30 17:21:15.810000+0000', tz='UTC'), 'signal': 5768.4822, 'isotope': 'U235'},
{'samples_count_10m': 2.0, 'samples_count_15m': 2.0,
'sample_time': pd.Timestamp('2021-05-30 17:24:15.811000+0000', tz='UTC'), 'signal': 929.028, 'isotope': 'U235'},
{'samples_count_10m': 1.0, 'samples_count_15m': 3.0,
'sample_time': pd.Timestamp('2021-05-30 17:27:15.812000+0000', tz='UTC'), 'signal': 2585.9646, 'isotope': 'U238'},
{'samples_count_10m': 1.0, 'samples_count_15m': 1.0,
'sample_time': pd.Timestamp('2021-05-30 17:30:15.813000+0000', tz='UTC'), 'signal': 358.918, 'isotope': 'U238'}]
assert termination_result == expected, \
f'actual did not match expected. \n actual: {termination_result} \n expected: {expected}'
def test_sliding_window_aggregation_with_uncommon_windows_flow():
time_format = '%Y-%m-%d %H:%M:%S.%f'
columns = ['sample_time', 'signal', 'isotope']
data = [[datetime.strptime('2021-05-30 16:42:15.797000', time_format).replace(tzinfo=timezone.utc), 790.235, 'U235'],
[datetime.strptime('2021-05-30 16:45:15.798000', time_format).replace(tzinfo=timezone.utc), 498.491, 'U235'],
[datetime.strptime('2021-05-30 16:48:15.799000', time_format).replace(tzinfo=timezone.utc), 34650.00343, 'U235'],
[datetime.strptime('2021-05-30 16:51:15.800000', time_format).replace(tzinfo=timezone.utc), 189.823, 'U235'],
[datetime.strptime('2021-05-30 16:54:15.801000', time_format).replace(tzinfo=timezone.utc), 379.524, 'U235'],
[datetime.strptime('2021-05-30 16:57:15.802000', time_format).replace(tzinfo=timezone.utc), 2225.4952, 'U235'],
[datetime.strptime('2021-05-30 17:00:15.803000', time_format).replace(tzinfo=timezone.utc), 1049.0903, 'U235'],
[datetime.strptime('2021-05-30 17:03:15.804000', time_format).replace(tzinfo=timezone.utc), 41905.63447, 'U235'],
[datetime.strptime('2021-05-30 17:06:15.805000', time_format).replace(tzinfo=timezone.utc), 4987.6764, 'U235'],
[datetime.strptime('2021-05-30 17:09:15.806000', time_format).replace(tzinfo=timezone.utc), 67657.11975, 'U235'],
[datetime.strptime('2021-05-30 17:12:15.807000', time_format).replace(tzinfo=timezone.utc), 56173.06327, 'U235'],
[datetime.strptime('2021-05-30 17:15:15.808000', time_format).replace(tzinfo=timezone.utc), 14249.67394, 'U235'],
[datetime.strptime('2021-05-30 17:18:15.809000', time_format).replace(tzinfo=timezone.utc), 656.831, 'U235'],
[datetime.strptime('2021-05-30 17:21:15.810000', time_format).replace(tzinfo=timezone.utc), 5768.4822, 'U235'],
[datetime.strptime('2021-05-30 17:24:15.811000', time_format).replace(tzinfo=timezone.utc), 929.028, 'U235'],
[datetime.strptime('2021-05-30 17:27:15.812000', time_format).replace(tzinfo=timezone.utc), 2585.9646, 'U235'],
[datetime.strptime('2021-05-30 17:30:15.813000', time_format).replace(tzinfo=timezone.utc), 358.918, 'U235']]
df = pd.DataFrame(data, columns=columns)
controller = build_flow([
DataframeSource(df, time_field="sample_time", key_field="isotope"),
AggregateByKey([FieldAggregator("samples", "signal", ["count"],
SlidingWindows(['15m', '25m', '45m', '1h'], '5m'))], Table("U235_test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
termination_result = controller.await_termination()
expected = [{'samples_count_15m': 1.0, 'samples_count_25m': 1.0, 'samples_count_45m': 1.0, 'samples_count_1h': 1.0,
'sample_time': pd.Timestamp('2021-05-30 16:42:15.797000+0000', tz='UTC'), 'signal': 790.235,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 2.0, 'samples_count_45m': 2.0, 'samples_count_1h': 2.0,
'sample_time': pd.Timestamp('2021-05-30 16:45:15.798000+0000', tz='UTC'), 'signal': 498.491,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 3.0, 'samples_count_45m': 3.0, 'samples_count_1h': 3.0,
'sample_time': pd.Timestamp('2021-05-30 16:48:15.799000+0000', tz='UTC'), 'signal': 34650.00343,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 4.0, 'samples_count_45m': 4.0, 'samples_count_1h': 4.0,
'sample_time': pd.Timestamp('2021-05-30 16:51:15.800000+0000', tz='UTC'), 'signal': 189.823,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 5.0, 'samples_count_45m': 5.0, 'samples_count_1h': 5.0,
'sample_time': pd.Timestamp('2021-05-30 16:54:15.801000+0000', tz='UTC'), 'signal': 379.524,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 6.0, 'samples_count_45m': 6.0, 'samples_count_1h': 6.0,
'sample_time': pd.Timestamp('2021-05-30 16:57:15.802000+0000', tz='UTC'), 'signal': 2225.4952,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 7.0, 'samples_count_45m': 7.0, 'samples_count_1h': 7.0,
'sample_time': pd.Timestamp('2021-05-30 17:00:15.803000+0000', tz='UTC'), 'signal': 1049.0903,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 8.0, 'samples_count_45m': 8.0, 'samples_count_1h': 8.0,
'sample_time': pd.Timestamp('2021-05-30 17:03:15.804000+0000', tz='UTC'), 'signal': 41905.63447,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 8.0, 'samples_count_45m': 9.0, 'samples_count_1h': 9.0,
'sample_time': pd.Timestamp('2021-05-30 17:06:15.805000+0000', tz='UTC'), 'signal': 4987.6764,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 9.0, 'samples_count_45m': 10.0, 'samples_count_1h': 10.0,
'sample_time': pd.Timestamp('2021-05-30 17:09:15.806000+0000', tz='UTC'), 'signal': 67657.11975,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 8.0, 'samples_count_45m': 11.0, 'samples_count_1h': 11.0,
'sample_time': pd.Timestamp('2021-05-30 17:12:15.807000+0000', tz='UTC'), 'signal': 56173.06327,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 7.0, 'samples_count_45m': 12.0, 'samples_count_1h': 12.0,
'sample_time': pd.Timestamp('2021-05-30 17:15:15.808000+0000', tz='UTC'), 'signal': 14249.67394,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 8.0, 'samples_count_45m': 13.0, 'samples_count_1h': 13.0,
'sample_time': pd.Timestamp('2021-05-30 17:18:15.809000+0000', tz='UTC'),
'signal': 656.831, 'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 8.0, 'samples_count_45m': 14.0, 'samples_count_1h': 14.0,
'sample_time': pd.Timestamp('2021-05-30 17:21:15.810000+0000', tz='UTC'), 'signal': 5768.4822,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 9.0, 'samples_count_45m': 15.0, 'samples_count_1h': 15.0,
'sample_time': pd.Timestamp('2021-05-30 17:24:15.811000+0000', tz='UTC'), 'signal': 929.028,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 8.0, 'samples_count_45m': 15.0, 'samples_count_1h': 16.0,
'sample_time': pd.Timestamp('2021-05-30 17:27:15.812000+0000', tz='UTC'), 'signal': 2585.9646,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 7.0, 'samples_count_45m': 14.0, 'samples_count_1h': 17.0,
'sample_time': pd.Timestamp('2021-05-30 17:30:15.813000+0000', tz='UTC'), 'signal': 358.918,
'isotope': 'U235'}]
assert termination_result == expected, \
f'actual did not match expected. \n actual: {termination_result} \n expected: {expected}'
def test_emit_max_event_sliding_window_multiple_keys_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver()), emit_policy=EmitAfterMaxEvent(3)),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(12):
data = {'col1': i}
controller.emit(data, f'{i % 2}', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 4, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_stuff_avg_1h': 2.0,
'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 9, 'number_of_stuff_sum_24h': 9, 'number_of_stuff_avg_1h': 3.0,
'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 10, 'number_of_stuff_sum_1h': 30, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_sum_24h': 30,
'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 5.0},
{'col1': 11, 'number_of_stuff_sum_1h': 36, 'number_of_stuff_sum_2h': 36, 'number_of_stuff_sum_24h': 36,
'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 6.0, 'number_of_stuff_avg_24h': 6.0}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_error_on_bad_emit_policy():
try:
AggregateByKey([], Table("test", NoopDriver()), emit_policy=EmitEveryEvent),
assert False
except TypeError:
pass
def test_emit_delay_aggregation_flow():
q = queue.Queue(1)
def reduce_fn(acc, x):
if x['col1'] == 2:
q.put(None)
acc.append(x)
return acc
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "count"],
SlidingWindows(['1h'], '10m'))],
Table("test", NoopDriver()), emit_policy=EmitAfterMaxEvent(4, 1)),
Reduce([], reduce_fn),
]).run()
for i in range(11):
if i == 3:
q.get()
data = {'col1': i}
controller.emit(data, 'katya', test_base_time + timedelta(seconds=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_count_1h': 3},
{'col1': 6, 'number_of_stuff_sum_1h': 21, 'number_of_stuff_count_1h': 7},
{'col1': 10, 'number_of_stuff_sum_1h': 55, 'number_of_stuff_count_1h': 11}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_aggregate_dict_simple_aggregation_flow():
aggregations = [{'name': 'number_of_stuff',
'column': 'col1',
'operations': ["sum", "avg", "min", "max"],
'windows': ['1h', '2h', '24h'],
'period': '10m'}]
controller = build_flow([
SyncEmitSource(),
AggregateByKey(aggregations, Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0,
'number_of_stuff_max_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1,
'number_of_stuff_max_24h': 1, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_sum_24h': 3, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2,
'number_of_stuff_max_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_stuff_min_1h': 1,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 3, 'number_of_stuff_max_2h': 3,
'number_of_stuff_max_24h': 3, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 1.5, 'number_of_stuff_avg_24h': 1.5},
{'col1': 4, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 10, 'number_of_stuff_sum_24h': 10, 'number_of_stuff_min_1h': 2,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 4, 'number_of_stuff_max_2h': 4,
'number_of_stuff_max_24h': 4, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 15, 'number_of_stuff_sum_24h': 15, 'number_of_stuff_min_1h': 3,
'number_of_stuff_min_2h': 1, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 5, 'number_of_stuff_max_2h': 5,
'number_of_stuff_max_24h': 5, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 2.5},
{'col1': 6, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 21, 'number_of_stuff_min_1h': 4,
'number_of_stuff_min_2h': 2, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 6, 'number_of_stuff_max_2h': 6,
'number_of_stuff_max_24h': 6, 'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'number_of_stuff_sum_1h': 18, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 28, 'number_of_stuff_min_1h': 5,
'number_of_stuff_min_2h': 3, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 7, 'number_of_stuff_max_2h': 7,
'number_of_stuff_max_24h': 7, 'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 3.5},
{'col1': 8, 'number_of_stuff_sum_1h': 21, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_sum_24h': 36, 'number_of_stuff_min_1h': 6,
'number_of_stuff_min_2h': 4, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 8, 'number_of_stuff_max_2h': 8,
'number_of_stuff_max_24h': 8, 'number_of_stuff_avg_1h': 7.0, 'number_of_stuff_avg_2h': 6.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'number_of_stuff_sum_1h': 24, 'number_of_stuff_sum_2h': 35, 'number_of_stuff_sum_24h': 45, 'number_of_stuff_min_1h': 7,
'number_of_stuff_min_2h': 5, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 9, 'number_of_stuff_max_2h': 9,
'number_of_stuff_max_24h': 9, 'number_of_stuff_avg_1h': 8.0, 'number_of_stuff_avg_2h': 7.0, 'number_of_stuff_avg_24h': 4.5}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_aggregate_dict_fixed_window():
aggregations = [{'name': 'number_of_stuff',
'column': 'col1',
'operations': ["count"],
'windows': ['1h', '2h', '3h', '24h']}]
controller = build_flow([
SyncEmitSource(),
AggregateByKey(aggregations, Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 1, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 2, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 3, 'number_of_stuff_count_1h': 3, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 4,
'number_of_stuff_count_24h': 4},
{'col1': 4, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 5,
'number_of_stuff_count_24h': 5},
{'col1': 5, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 5, 'number_of_stuff_count_3h': 6,
'number_of_stuff_count_24h': 6},
{'col1': 6, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 7, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 8, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 9, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 4,
'number_of_stuff_count_24h': 4}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_old_event():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg", "min", "max"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(3):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.emit({'col1': 3}, 'tal', test_base_time - timedelta(hours=25))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0,
'number_of_stuff_max_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1,
'number_of_stuff_max_24h': 1, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_sum_24h': 3, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2,
'number_of_stuff_max_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_old_event():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["count"],
FixedWindows(['1h', '2h', '3h', '24h']))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(3):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.emit({'col1': 3}, 'tal', test_base_time - timedelta(hours=25))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 1, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 2, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 3}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_out_of_order_event():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["count"],
FixedWindows(['1h', '2h']))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(3):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.emit({'col1': 3}, 'tal', test_base_time + timedelta(minutes=15))
controller.emit({'col1': 4}, 'tal', test_base_time + timedelta(minutes=25 * 3))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1},
{'col1': 1, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1},
{'col1': 2, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2},
{'col1': 3, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2},
{'col1': 4, 'number_of_stuff_count_1h': 3, 'number_of_stuff_count_2h': 3}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_roll_cached_buckets():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["count"],
FixedWindows(['1h', '2h', '3h']))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1},
{'col1': 1, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 2},
{'col1': 2, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 3},
{'col1': 3, 'number_of_stuff_count_1h': 3, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 4},
{'col1': 4, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 5},
{'col1': 5, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 5, 'number_of_stuff_count_3h': 6},
{'col1': 6, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1},
{'col1': 7, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 2},
{'col1': 8, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 3},
{'col1': 9, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 4}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_roll_cached_buckets():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg", "min", "max"],
SlidingWindows(['1h', '2h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_min_1h': 0, 'number_of_stuff_min_2h': 0,
'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_min_1h': 0, 'number_of_stuff_min_2h': 0,
'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5},
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_min_1h': 0, 'number_of_stuff_min_2h': 0,
'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_min_1h': 1, 'number_of_stuff_min_2h': 0,
'number_of_stuff_max_1h': 3, 'number_of_stuff_max_2h': 3, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 1.5},
{'col1': 4, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 10, 'number_of_stuff_min_1h': 2, 'number_of_stuff_min_2h': 0,
'number_of_stuff_max_1h': 4, 'number_of_stuff_max_2h': 4, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 15, 'number_of_stuff_min_1h': 3, 'number_of_stuff_min_2h': 1,
'number_of_stuff_max_1h': 5, 'number_of_stuff_max_2h': 5, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 3.0},
{'col1': 6, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_min_1h': 4, 'number_of_stuff_min_2h': 2,
'number_of_stuff_max_1h': 6, 'number_of_stuff_max_2h': 6, 'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 4.0},
{'col1': 7, 'number_of_stuff_sum_1h': 18, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_min_1h': 5, 'number_of_stuff_min_2h': 3,
'number_of_stuff_max_1h': 7, 'number_of_stuff_max_2h': 7, 'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 5.0},
{'col1': 8, 'number_of_stuff_sum_1h': 21, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_min_1h': 6, 'number_of_stuff_min_2h': 4,
'number_of_stuff_max_1h': 8, 'number_of_stuff_max_2h': 8, 'number_of_stuff_avg_1h': 7.0, 'number_of_stuff_avg_2h': 6.0},
{'col1': 9, 'number_of_stuff_sum_1h': 24, 'number_of_stuff_sum_2h': 35, 'number_of_stuff_min_1h': 7, 'number_of_stuff_min_2h': 5,
'number_of_stuff_max_1h': 9, 'number_of_stuff_max_2h': 9, 'number_of_stuff_avg_1h': 8.0, 'number_of_stuff_avg_2h': 7.0}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_aggregation_unique_fields():
try:
build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_stuff", "col1", ["count"],
SlidingWindows(['1h', '2h'], '15m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)), ]).run()
assert False
except TypeError:
pass
def test_fixed_window_aggregation_with_first_and_last_aggregates():
df = pd.DataFrame(
{
"timestamp": [
pd.Timestamp("2021-07-13 06:43:01.084587+0000", tz="UTC"),
pd.Timestamp("2021-07-13 06:46:01.084587+0000", tz="UTC"),
| pd.Timestamp("2021-07-13 06:49:01.084587+0000", tz="UTC") | pandas.Timestamp |
import pickle
import os
from Shared.data import Data
from Shared.data_loader import DataLoader
import numpy as np
import keras
from keras import layers
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import anndata as ad
import pandas as pd
from pathlib import Path
import umap
import tensorflow as tf
import sys
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
class DenoisingAutoEncoder:
data: Data
# The defined encoder
encoder: any
# The defined decoder
decoder: any
# The ae
ae: any
history: any
input_dim: int
encoding_dim: int
input_umap: any
latent_umap: any
r2_scores = pd.DataFrame(columns=["Marker", "Score"])
encoded_data = | pd.DataFrame() | pandas.DataFrame |
"""
Test various functions regarding chapter 8: MDI, MDA, SFI importance.
"""
import os
import unittest
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, accuracy_score
from mlfinlab.util.volatility import get_daily_vol
from mlfinlab.filters.filters import cusum_filter
from mlfinlab.labeling.labeling import get_events, add_vertical_barrier, get_bins
from mlfinlab.sampling.bootstrapping import get_ind_mat_label_uniqueness, get_ind_matrix
from mlfinlab.ensemble.sb_bagging import SequentiallyBootstrappedBaggingClassifier
from mlfinlab.feature_importance.importance import (feature_importance_mean_decrease_impurity,
feature_importance_mean_decrease_accuracy, feature_importance_sfi,
plot_feature_importance)
from mlfinlab.feature_importance.orthogonal import feature_pca_analysis, get_orthogonal_features
from mlfinlab.cross_validation.cross_validation import PurgedKFold, ml_cross_val_score
# pylint: disable=invalid-name
def _generate_label_with_prob(x, prob, random_state=np.random.RandomState(1)):
"""
Generates true label value with some probability(prob)
"""
choice = random_state.choice([0, 1], p=[1 - prob, prob])
if choice == 1:
return x
return int(not x)
def _get_synthetic_samples(ind_mat, good_samples_thresh, bad_samples_thresh):
"""
Get samples with uniqueness either > good_samples_thresh or uniqueness < bad_samples_thresh
"""
# Get mix of samples where some of them are extremely non-overlapping, the other one are highly overlapping
i = 0
unique_samples = []
for label in get_ind_mat_label_uniqueness(ind_mat):
if np.mean(label[label > 0]) > good_samples_thresh or np.mean(label[label > 0]) < bad_samples_thresh:
unique_samples.append(i)
i += 1
return unique_samples
class TestFeatureImportance(unittest.TestCase):
"""
Test Feature importance
"""
def setUp(self):
"""
Set the file path for the sample dollar bars data and get triple barrier events, generate features
"""
project_path = os.path.dirname(__file__)
self.path = project_path + '/test_data/dollar_bar_sample.csv'
self.data = | pd.read_csv(self.path, index_col='date_time') | pandas.read_csv |
from ._rrs import decode_rrs
from ._sondepbl import heffter_pbl, bulk_richardson_pbl, liu_liang_pbl
import pandas as pd
# this makes the imported functions appear in sphinx docs
__all__ = ['decode_rrs', 'estimate_pbl']
# an interface to my sondepbl code
def estimate_pbl(method, height, pressure, temp,
rh=None, ws=None, u=None, v=None,
land=True, critical_threshold=.25):
"""Estimate PBL height using the methods described in :cite:`sivaraman_planetary_2013`.
Args:
method (str): Method to use to estimate PBL. One of 'richardson', 'heffter', or 'liu-liang'.
height (ndarray): Array of heights in km.
pressure (ndarray): Array of pressures in mb.
temp (ndarray): Array of temperatures in °C.
rh (ndarray): Array of relative humidities. Only required when method is 'richardson'.
ws (ndarray): Array of wind speeds in m/s. Only required when method is 'liu-liang'.
u (ndarray): Array of U wind components in m/s. Only required when method is 'richardson'.
v (ndarray): Array of V wind components in m/s. Only required when method is 'richardson'.
land (bool): Whether the sonde is over land, as opposed to ocean or ice. Only used when method is 'liu-liang'.
critical_threshold (float): Bulk Richardson critical threshold. Only used when method is 'richardson'.
Returns:
float: Estimated PBL height in km.
.. bibliography:: ../wxprofilers.bib
"""
# organize a data frame similar to the LISTOS sonde data
df = | pd.DataFrame(data={'P': pressure, 'Height': height, 'Temp': temp}) | pandas.DataFrame |
from database import view
import pandas as pd
# Example data retrieval
def main():
data = view.all()
ours_save_file = "./yu_blender_dialogs.csv"
shirleys_save_file = "./shirley_inspired_dialogs.csv"
raw_blender_save_file = "./raw_blender_dialogs.csv"
zhou_save_file = "./zhou_half_half_dialogs.csv"
user_entity_save_file = "./user_entity_dialogs.csv"
sum_dialog = {"blue": {"bot": [], "human": []},
"cyan": {"bot": [], "human": []},
"yellow": {"bot": [], "human": []},
"green": {"bot": [], "human": []},
"red": {"bot": [], "human": []},}
sum_surveys = { "pre": {"kind_of_movies": [], "favorite_movies":[], "favorite_actors":[], "favorite_directors":[],},
"blue": {"recommend_":[],"preference":[],"consistent":[],"natural":[],"engaging":[],"persuasive":[],"sociable":[],"boring":[],},
"cyan": {"recommend_":[],"preference":[],"consistent":[],"natural":[],"engaging":[],"persuasive":[],"sociable":[],"boring":[],},
"yellow": {"recommend_":[],"preference":[],"consistent":[],"natural":[],"engaging":[],"persuasive":[],"sociable":[],"boring":[],},
"green": {"recommend_":[],"preference":[],"consistent":[],"natural":[],"engaging":[],"persuasive":[],"sociable":[],"boring":[],},
"red": {"recommend_":[],"preference":[],"consistent":[],"natural":[],"engaging":[],"persuasive":[],"sociable":[],"boring":[],},
"survey_4": {"kind_of_movies": [],}}
sum_worker_id = []
sum_blue_length = []
sum_cyan_length = []
sum_yellow_length = []
sum_green_length = []
sum_red_length = []
for hit in data:
if hit.complete:
# print("Info:", type(hit.info))
# print("Dialog:", type(hit.dialog))
# print("Forms:", type(hit.forms))
bot_names = hit.dialog.keys()
for bot_name in bot_names:
sum_dialog[bot_name]["bot"].append(hit.info['worker_id'])
sum_dialog[bot_name]["human"].append("")
for turn in hit.dialog[bot_name]:
sum_dialog[bot_name]["bot"].append(turn['bot'])
sum_dialog[bot_name]["human"].append(turn['human'])
sum_dialog[bot_name]["bot"].append("")
sum_dialog[bot_name]["human"].append("")
df_dialogs_blue = pd.DataFrame(sum_dialog["blue"])
df_dialogs_cyan = pd.DataFrame(sum_dialog["cyan"])
df_dialogs_yellow = pd.DataFrame(sum_dialog["yellow"])
df_dialogs_green = pd.DataFrame(sum_dialog["green"])
df_dialogs_red = | pd.DataFrame(sum_dialog["red"]) | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
import numpy.random as rd
import torch
class StockTradingEnv:
def __init__(self, cwd='./envs/FinRL', gamma=0.99,
max_stock=1e2, initial_capital=1e6, buy_cost_pct=1e-3, sell_cost_pct=1e-3,
start_date='2008-03-19', end_date='2016-01-01', env_eval_date='2021-01-01',
ticker_list=None, tech_indicator_list=None, initial_stocks=None, if_eval=False):
self.price_ary, self.tech_ary = self.load_data(cwd, if_eval, ticker_list, tech_indicator_list,
start_date, end_date, env_eval_date, )
stock_dim = self.price_ary.shape[1]
self.gamma = gamma
self.max_stock = max_stock
self.buy_cost_pct = buy_cost_pct
self.sell_cost_pct = sell_cost_pct
self.initial_capital = initial_capital
self.initial_stocks = np.zeros(stock_dim, dtype=np.float32) if initial_stocks is None else initial_stocks
# reset()
self.day = None
self.amount = None
self.stocks = None
self.total_asset = None
self.initial_total_asset = None
self.gamma_reward = 0.0
# environment information
self.env_name = 'StockTradingEnv-v1'
self.state_dim = 1 + 2 * stock_dim + self.tech_ary.shape[1]
self.action_dim = stock_dim
self.max_step = len(self.price_ary) - 1
self.if_discrete = False
self.target_return = 3.5
self.episode_return = 0.0
def reset(self):
self.day = 0
price = self.price_ary[self.day]
self.stocks = self.initial_stocks + rd.randint(0, 64, size=self.initial_stocks.shape)
self.amount = self.initial_capital * rd.uniform(0.95, 1.05) - (self.stocks * price).sum()
self.total_asset = self.amount + (self.stocks * price).sum()
self.initial_total_asset = self.total_asset
self.gamma_reward = 0.0
state = np.hstack((self.amount * 2 ** -13,
price,
self.stocks,
self.tech_ary[self.day],)).astype(np.float32) * 2 ** -5
return state
def step(self, actions):
actions = (actions * self.max_stock).astype(int)
self.day += 1
price = self.price_ary[self.day]
for index in np.where(actions < 0)[0]: # sell_index:
if price[index] > 0: # Sell only if current asset is > 0
sell_num_shares = min(self.stocks[index], -actions[index])
self.stocks[index] -= sell_num_shares
self.amount += price[index] * sell_num_shares * (1 - self.sell_cost_pct)
for index in np.where(actions > 0)[0]: # buy_index:
if price[index] > 0: # Buy only if the price is > 0 (no missing data in this particular date)
buy_num_shares = min(self.amount // price[index], actions[index])
self.stocks[index] += buy_num_shares
self.amount -= price[index] * buy_num_shares * (1 + self.buy_cost_pct)
state = np.hstack((self.amount * 2 ** -13,
price,
self.stocks,
self.tech_ary[self.day],)).astype(np.float32) * 2 ** -5
total_asset = self.amount + (self.stocks * price).sum()
reward = (total_asset - self.total_asset) * 2 ** -14 # reward scaling
self.total_asset = total_asset
self.gamma_reward = self.gamma_reward * self.gamma + reward
done = self.day == self.max_step
if done:
reward = self.gamma_reward
self.episode_return = total_asset / self.initial_total_asset
return state, reward, done, dict()
def load_data(self, cwd='./envs/FinRL', if_eval=None,
ticker_list=None, tech_indicator_list=None,
start_date='2008-03-19', end_date='2016-01-01', env_eval_date='2021-01-01'):
raw_data_path = f'{cwd}/StockTradingEnv_raw_data.df'
processed_data_path = f'{cwd}/StockTradingEnv_processed_data.df'
data_path_array = f'{cwd}/StockTradingEnv_arrays_float16.npz'
tech_indicator_list = [
'macd', 'boll_ub', 'boll_lb', 'rsi_30', 'cci_30', 'dx_30', 'close_30_sma', 'close_60_sma'
] if tech_indicator_list is None else tech_indicator_list
# ticker_list = [
# 'AAPL', 'MSFT', 'JPM', 'V', 'RTX', 'PG', 'GS', 'NKE', 'DIS', 'AXP', 'HD',
# 'INTC', 'WMT', 'IBM', 'MRK', 'UNH', 'KO', 'CAT', 'TRV', 'JNJ', 'CVX', 'MCD',
# 'VZ', 'CSCO', 'XOM', 'BA', 'MMM', 'PFE', 'WBA', 'DD'
# ] if ticker_list is None else ticker_list # finrl.config.DOW_30_TICKER
ticker_list = [
'AAPL', 'ADBE', 'ADI', 'ADP', 'ADSK', 'ALGN', 'ALXN', 'AMAT', 'AMD', 'AMGN',
'AMZN', 'ASML', 'ATVI', 'BIIB', 'BKNG', 'BMRN', 'CDNS', 'CERN', 'CHKP', 'CMCSA',
'COST', 'CSCO', 'CSX', 'CTAS', 'CTSH', 'CTXS', 'DLTR', 'EA', 'EBAY', 'FAST',
'FISV', 'GILD', 'HAS', 'HSIC', 'IDXX', 'ILMN', 'INCY', 'INTC', 'INTU', 'ISRG',
'JBHT', 'KLAC', 'LRCX', 'MAR', 'MCHP', 'MDLZ', 'MNST', 'MSFT', 'MU', 'MXIM',
'NLOK', 'NTAP', 'NTES', 'NVDA', 'ORLY', 'PAYX', 'PCAR', 'PEP', 'QCOM', 'REGN',
'ROST', 'SBUX', 'SIRI', 'SNPS', 'SWKS', 'TTWO', 'TXN', 'VRSN', 'VRTX', 'WBA',
'WDC', 'WLTW', 'XEL', 'XLNX'
] if ticker_list is None else ticker_list # finrl.config.NAS_74_TICKER
# ticker_list = [
# 'AMGN', 'AAPL', 'AMAT', 'INTC', 'PCAR', 'PAYX', 'MSFT', 'ADBE', 'CSCO', 'XLNX',
# 'QCOM', 'COST', 'SBUX', 'FISV', 'CTXS', 'INTU', 'AMZN', 'EBAY', 'BIIB', 'CHKP',
# 'GILD', 'NLOK', 'CMCSA', 'FAST', 'ADSK', 'CTSH', 'NVDA', 'GOOGL', 'ISRG', 'VRTX',
# 'HSIC', 'BIDU', 'ATVI', 'ADP', 'ROST', 'ORLY', 'CERN', 'BKNG', 'MYL', 'MU',
# 'DLTR', 'ALXN', 'SIRI', 'MNST', 'AVGO', 'TXN', 'MDLZ', 'FB', 'ADI', 'WDC',
# 'REGN', 'LBTYK', 'VRSK', 'NFLX', 'TSLA', 'CHTR', 'MAR', 'ILMN', 'LRCX', 'EA',
# 'AAL', 'WBA', 'KHC', 'BMRN', 'JD', 'SWKS', 'INCY', 'PYPL', 'CDW', 'FOXA', 'MXIM',
# 'TMUS', 'EXPE', 'TCOM', 'ULTA', 'CSX', 'NTES', 'MCHP', 'CTAS', 'KLAC', 'HAS',
# 'JBHT', 'IDXX', 'WYNN', 'MELI', 'ALGN', 'CDNS', 'WDAY', 'SNPS', 'ASML', 'TTWO',
# 'PEP', 'NXPI', 'XEL', 'AMD', 'NTAP', 'VRSN', 'LULU', 'WLTW', 'UAL'
# ] if ticker_list is None else ticker_list # finrl.config.NAS_100_TICKER
# print(raw_df.loc['2000-01-01'])
# j = 40000
# check_ticker_list = set(raw_df.loc.obj.tic[j:j + 200].tolist())
# print(len(check_ticker_list), check_ticker_list)
'''get: train_price_ary, train_tech_ary, eval_price_ary, eval_tech_ary'''
if os.path.exists(data_path_array):
load_dict = np.load(data_path_array)
train_price_ary = load_dict['train_price_ary'].astype(np.float32)
train_tech_ary = load_dict['train_tech_ary'].astype(np.float32)
eval_price_ary = load_dict['eval_price_ary'].astype(np.float32)
eval_tech_ary = load_dict['eval_tech_ary'].astype(np.float32)
else:
processed_df = self.processed_raw_data(raw_data_path, processed_data_path,
ticker_list, tech_indicator_list)
def data_split(df, start, end):
data = df[(df.date >= start) & (df.date < end)]
data = data.sort_values(["date", "tic"], ignore_index=True)
data.index = data.date.factorize()[0]
return data
train_df = data_split(processed_df, start_date, end_date)
eval_df = data_split(processed_df, end_date, env_eval_date)
train_price_ary, train_tech_ary = self.convert_df_to_ary(train_df, tech_indicator_list)
eval_price_ary, eval_tech_ary = self.convert_df_to_ary(eval_df, tech_indicator_list)
np.savez_compressed(data_path_array,
train_price_ary=train_price_ary.astype(np.float16),
train_tech_ary=train_tech_ary.astype(np.float16),
eval_price_ary=eval_price_ary.astype(np.float16),
eval_tech_ary=eval_tech_ary.astype(np.float16), )
if if_eval is None:
price_ary = np.concatenate((train_price_ary, eval_price_ary), axis=0)
tech_ary = np.concatenate((train_tech_ary, eval_tech_ary), axis=0)
elif if_eval:
price_ary = eval_price_ary
tech_ary = eval_tech_ary
else:
price_ary = train_price_ary
tech_ary = train_tech_ary
return price_ary, tech_ary
def processed_raw_data(self, raw_data_path, processed_data_path,
ticker_list, tech_indicator_list):
if os.path.exists(processed_data_path):
processed_df = | pd.read_pickle(processed_data_path) | pandas.read_pickle |
import rebound
import numpy as np
import pandas as pd
import multiprocessing
from collections import OrderedDict
from celmech.poincare import Poincare, PoincareHamiltonian
from celmech import Andoyer, AndoyerHamiltonian
from celmech.resonances import resonant_period_ratios, resonance_intersections_list, resonance_pratio_span
from celmech.transformations import masses_to_jacobi
from celmech.andoyer import get_num_fixed_points
import itertools
def collision(reb_sim, col):
reb_sim.contents._status = 5
return 0
def safe_run_func(runfunc):
def new_run_func(*args, **kwargs):
try:
return runfunc(*args, **kwargs)
except RuntimeError:
return None
return new_run_func
from scipy.optimize import brenth
def F(e,alpha,gamma):
"""Equation 35 of Laskar & Petit (2017)"""
denom = np.sqrt(alpha*(1-e*e)+gamma*gamma*e*e)
return alpha*e -1 + alpha + gamma*e / denom
### start AMD functions
def critical_relative_AMD(alpha,gamma):
"""Equation 29"""
e0 = np.min((1,1/alpha-1))
ec = brenth(F,0,e0,args=(alpha,gamma))
e1c = np.sin(np.arctan(gamma*ec / np.sqrt(alpha*(1-ec*ec))))
curlyC = gamma*np.sqrt(alpha) * (1-np.sqrt(1-ec*ec)) + (1 - np.sqrt(1-e1c*e1c))
return curlyC
@safe_run_func
def compute_AMD(sim):
pstar = sim.particles[0]
Ltot = pstar.m * np.cross(pstar.xyz,pstar.vxyz)
ps = sim.particles[1:]
Lmbda=np.zeros(len(ps))
G = np.zeros(len(ps))
Lhat = np.zeros((len(ps),3))
for k,p in enumerate(sim.particles[1:]):
orb = p.calculate_orbit(primary=pstar)
Lmbda[k] = p.m * np.sqrt(p.a)
G[k] = Lmbda[k] * np.sqrt(1-p.e*p.e)
hvec = np.cross(p.xyz,p.vxyz)
Lhat[k] = hvec / np.linalg.norm(hvec)
Ltot = Ltot + p.m * hvec
cosi = np.array([Lh.dot(Ltot) for Lh in Lhat]) / np.linalg.norm(Ltot)
return np.sum(Lmbda) - np.sum(G * cosi)
@safe_run_func
def AMD_stable_Q(sim):
AMD = compute_AMD(sim)
pstar = sim.particles[0]
ps = sim.particles[1:]
for i in range(len(ps)-1):
pIn = ps[i]
pOut = ps[i+1]
orbIn = pIn.calculate_orbit(pstar)
orbOut = pOut.calculate_orbit(pstar)
alpha = orbIn.a / orbOut.a
gamma = pIn.m / pOut.m
LmbdaOut = pOut.m * np.sqrt(orbOut.a)
Ccrit = critical_relative_AMD(alpha,gamma)
C = AMD / LmbdaOut
if C>Ccrit:
return False
return True
@safe_run_func
def AMD_stability_coefficients(sim):
AMD = compute_AMD(sim)
pstar = sim.particles[0]
ps = sim.particles[1:]
coeffs = np.zeros(len(ps)-1)
for i in range(len(ps)-1):
pIn = ps[i]
pOut = ps[i+1]
orbIn = pIn.calculate_orbit(pstar)
orbOut = pOut.calculate_orbit(pstar)
alpha = orbIn.a / orbOut.a
gamma = pIn.m / pOut.m
LmbdaOut = pOut.m * np.sqrt(orbOut.a)
Ccrit = critical_relative_AMD(alpha,gamma)
C = AMD / LmbdaOut
coeffs[i] = C / Ccrit
return coeffs
def AMD_stability_coefficient(sim, i1, i2):
AMD = compute_AMD(sim)
ps = sim.particles
pstar = ps[0]
pIn = ps[i1]
pOut = ps[i2]
orbIn = pIn.calculate_orbit(pstar)
orbOut = pOut.calculate_orbit(pstar)
alpha = orbIn.a / orbOut.a
gamma = pIn.m / pOut.m
LmbdaOut = pOut.m * np.sqrt(orbOut.a)
Ccrit = critical_relative_AMD(alpha,gamma)
C = AMD / LmbdaOut
return C / Ccrit
### end AMD functions
# write functions to take args and unpack them at top so it's clear what you have to pass in args
@safe_run_func
def orbtseries(sim, args, trio):
Norbits = args[0]
Nout = args[1]
val = np.zeros((Nout, 19))
###############################
sim.collision_resolve = collision
sim.ri_whfast.keep_unsynchronized = 1
###############################
# Chunk above should be the same in all runfuncs we write in order to match simarchives
# Fill in values below
times = np.linspace(0, Norbits*sim.particles[1].P, Nout) # TTV systems don't have ps[1].P=1, so must multiply!
P0 = sim.particles[1].P
a0 = sim.particles[1].a
for i, time in enumerate(times):
try:
sim.integrate(time, exact_finish_time=0)
except:
break
orbits = sim.calculate_orbits()
skipped = 0
for j, o in enumerate(orbits):
#print(j, trio)
if j+1 not in trio:
skipped += 1
continue
#print(j, 'actually in', trio, skipped)
val[i,0] = sim.t/P0
val[i,6*(j-skipped)+1] = o.a/a0
val[i,6*(j-skipped)+2] = o.e
val[i,6*(j-skipped)+3] = o.inc
val[i,6*(j-skipped)+4] = o.Omega
val[i,6*(j-skipped)+5] = o.pomega
val[i,6*(j-skipped)+6] = o.M
return val
@safe_run_func
def orbsummaryfeaturesxgb(sim, args):
Norbits = args[0]
Nout = args[1]
window = args[2]
###############################
sim.collision_resolve = collision
sim.ri_whfast.keep_unsynchronized = 1
##############################
times = np.linspace(0, Norbits*sim.particles[1].P, Nout) # TTV systems don't have ps[1].P=1, so must multiply!
ps = sim.particles
P0 = ps[1].P
Nout = len(times)
features = OrderedDict()
AMDcoeffs = AMD_stability_coefficients(sim)
features["C_AMD12"] = AMDcoeffs[0]
features["C_AMD23"] = AMDcoeffs[1]
features["C_AMD_max"] = np.max(AMDcoeffs)
a = np.zeros((sim.N,Nout))
e = np.zeros((sim.N,Nout))
inc = np.zeros((sim.N,Nout))
beta12 = np.zeros(Nout)
beta23 = np.zeros(Nout)
Rhill12 = ps[1].a*((ps[1].m+ps[2].m)/3.)**(1./3.)
Rhill23 = ps[2].a*((ps[2].m+ps[3].m)/3.)**(1./3.)
eHill = [0, Rhill12/ps[1].a, max(Rhill12, Rhill23)/ps[2].a, Rhill23/ps[3].a]
daOvera = [0, (ps[2].a-ps[1].a)/ps[1].a, min(ps[3].a-ps[2].a, ps[2].a-ps[1].a)/ps[2].a, (ps[3].a-ps[2].a)/ps[3].a]
for i, t in enumerate(times):
for j in [1,2,3]:
a[j,i] = ps[j].a
e[j,i] = ps[j].e
inc[j,i] = ps[j].inc
# mutual hill radii since that's what goes into Hill stability
Rhill12 = ps[1].a*((ps[1].m+ps[2].m)/3.)**(1./3.)
Rhill23 = ps[2].a*((ps[2].m+ps[3].m)/3.)**(1./3.)
beta12[i] = (ps[2].a - ps[1].a)/Rhill12
beta23[i] = (ps[3].a - ps[2].a)/Rhill23
try:
sim.integrate(t, exact_finish_time=0)
except:
break
features['t_final_short'] = sim.t/P0
for string, feature in [("beta12", beta12), ("beta23", beta23)]:
mean = feature.mean()
std = feature.std()
features["avg_"+string] = mean
features["std_"+string] = std
features["min_"+string] = min(feature)
features["max_"+string] = max(feature)
for j in [1,2,3]:
for string, feature in [('a', a), ('e', e), ('inc', inc)]:
mean = feature[j].mean()
std = feature[j].std()
features['avg_'+string+str(j)] = mean
features['std_'+string+str(j)] = std
features['max_'+string+str(j)] = feature[j].max()
features['min_'+string+str(j)] = feature[j].min()
features['norm_std_'+string+str(j)] = std/mean
features['norm_max_'+string+str(j)] = np.abs(feature[j] - mean).max()/mean
sample = feature[j][:window]
samplemean = sample.mean()
features['norm_std_window'+str(window)+'_'+string+str(j)] = sample.std()/samplemean
features['norm_max_window'+str(window)+'_'+string+str(j)] = np.abs(sample - samplemean).max()/samplemean
for string, feature in [('eH', e), ('iH', inc)]:
mean = feature[j].mean()
std = feature[j].std()
features['avg_'+string+str(j)] = mean/eHill[j]
features['std_'+string+str(j)] = std/eHill[j]
features['max_'+string+str(j)] = feature[j].max()/eHill[j]
features['min_'+string+str(j)] = feature[j].min()/eHill[j]
string, feature = ('ecross', e)
features['avg_'+string+str(j)] = mean/daOvera[j]
features['std_'+string+str(j)] = std/daOvera[j]
features['max_'+string+str(j)] = feature[j].max()/daOvera[j]
features['min_'+string+str(j)] = feature[j].min()/daOvera[j]
xx = range(a[j].shape[0])
yy = a[j]/a[j].mean()/features["t_final_short"]
par = np.polyfit(xx, yy, 1, full=True)
features['norm_a'+str(j)+'_slope'] = par[0][0]
return pd.Series(features, index=list(features.keys()))
def findres(sim, i1, i2):
delta = 0.03
maxorder = 2
ps = Poincare.from_Simulation(sim=sim).particles # get averaged mean motions
n1 = ps[i1].n
n2 = ps[i2].n
m1 = ps[i1].m
m2 = ps[i2].m
Pratio = n2/n1
if np.isnan(Pratio): # probably due to close encounter where averaging step doesn't converge
return np.nan, np.nan, np.nan
res = resonant_period_ratios(Pratio-delta,Pratio+delta, order=maxorder)
Z = np.sqrt((ps[i1].e*np.cos(ps[i1].pomega) - ps[i2].e*np.cos(ps[i2].pomega))**2 + (ps[i1].e*np.sin(ps[i1].pomega) - ps[i2].e*np.sin(ps[i2].pomega))**2)
maxstrength = 0
j, k, i1, i2, strength = -1, -1, -1, -1, -1
for a, b in res:
s = np.abs(np.sqrt(m1+m2)*Z**((b-a)/2.)/(b*n2 - a*n1))
#print('{0}:{1}'.format(b, a), (b*n2 - a*n1), s)
if s > maxstrength:
j = b
k = b-a
i1 = 1
i2 = 2
strength=s
maxstrength = s
return j, k, strength
def findres2(sim, i1, i2):
maxorder = 2
ps = Poincare.from_Simulation(sim=sim).particles # get averaged mean motions
n1 = ps[i1].n
n2 = ps[i2].n
m1 = ps[i1].m/ps[i1].M
m2 = ps[i2].m/ps[i2].M
Pratio = n2/n1
if np.isnan(Pratio): # probably due to close encounter where averaging step doesn't converge
return np.nan, np.nan, np.nan
delta = 0.03
minperiodratio = max(Pratio-delta, 0.)
maxperiodratio = min(Pratio+delta, 0.999) # too many resonances close to 1
res = resonant_period_ratios(minperiodratio,maxperiodratio, order=2)
Z = np.sqrt((ps[i1].e*np.cos(ps[i1].pomega) - ps[i2].e*np.cos(ps[i2].pomega))**2 + (ps[i1].e*np.sin(ps[i1].pomega) - ps[i2].e*np.sin(ps[i2].pomega))**2)
Zcross = (ps[i2].a-ps[i1].a)/ps[i1].a
j, k, i1, i2, maxstrength = -1, -1, -1, -1, -1
for a, b in res:
s = np.abs(np.sqrt(m1+m2)*(Z/Zcross)**((b-a)/2.)/((b*n2 - a*n1)/n1))
#print('{0}:{1}'.format(b, a), (b*n2 - a*n1), s)
if s > maxstrength:
j = b
k = b-a
maxstrength = s
if maxstrength > -1:
return j, k, maxstrength
else:
return np.nan, np.nan, np.nan
def findresv3(sim, i1, i2):
maxorder = 2
try:
ps = Poincare.from_Simulation(sim=sim, average=False).particles # get averaged mean motions
except:
return np.nan, np.nan, np.nan
n1 = ps[i1].n
n2 = ps[i2].n
m1 = ps[i1].m/ps[i1].M
m2 = ps[i2].m/ps[i2].M
Pratio = n2/n1
if np.isnan(Pratio): # probably due to close encounter where averaging step doesn't converge
return np.nan, np.nan, np.nan
delta = 0.03
minperiodratio = max(Pratio-delta, 0.)
maxperiodratio = min(Pratio+delta, 0.999) # too many resonances close to 1
res = resonant_period_ratios(minperiodratio,maxperiodratio, order=2)
Z = np.sqrt((ps[i1].e*np.cos(ps[i1].pomega) - ps[i2].e*np.cos(ps[i2].pomega))**2 + (ps[i1].e*np.sin(ps[i1].pomega) - ps[i2].e*np.sin(ps[i2].pomega))**2)
Zcross = (ps[i2].a-ps[i1].a)/ps[i1].a
j, k, i1, i2, maxstrength = -1, -1, -1, -1, -1
for a, b in res:
s = np.abs(np.sqrt(m1+m2)*(Z/Zcross)**((b-a)/2.)/((b*n2 - a*n1)/n1))
#print('{0}:{1}'.format(b, a), (b*n2 - a*n1), s)
if s > maxstrength:
j = b
k = b-a
maxstrength = s
return j, k, maxstrength
@safe_run_func
def normressummaryfeaturesxgb(sim, args):
ps = sim.particles
Mstar = ps[0].m
P1 = ps[1].P
sim2 = rebound.Simulation()
sim2.G = 4*np.pi**2
sim2.add(m=1.)
for p in ps[1:]:
sim2.add(m=p.m/Mstar, P=p.P/P1, e=p.e, inc=p.inc, pomega=p.pomega, Omega=p.Omega, theta=p.theta)
sim2.move_to_com()
sim2.integrator="whfast"
sim2.dt=sim2.particles[1].P*2.*np.sqrt(3)/100.
return ressummaryfeaturesxgb(sim2, args)
@safe_run_func
def ressummaryfeaturesxgb(sim, args):
Norbits = args[0]
Nout = args[1]
###############################
sim.collision_resolve = collision
sim.ri_whfast.keep_unsynchronized = 1
sim.ri_whfast.safe_mode = 0
##############################
features = OrderedDict()
try:
AMDcoeffs = AMD_stability_coefficients(sim)
features["C_AMD12"] = AMDcoeffs[0]
features["C_AMD23"] = AMDcoeffs[1]
features["C_AMD_max"] = np.max(AMDcoeffs)
except:
features["C_AMD12"] = np.nan
features["C_AMD23"] = np.nan
features["C_AMD_max"] = np.nan
ps = sim.particles
sim.init_megno(seed=0)
N = sim.N - sim.N_var
a0 = [0] + [sim.particles[i].a for i in range(1, N)]
Npairs = int((N-1)*(N-2)/2)
js, ks, strengths = np.zeros(Npairs), np.zeros(Npairs), np.zeros(Npairs)
maxj, maxk, maxi1, maxi2, maxpairindex, maxstrength = -1, -1, -1, -1, -1, -1
Zcross = np.zeros(Npairs)
#print('pairindex, i1, i2, j, k, strength')
for i, [i1, i2] in enumerate(itertools.combinations(np.arange(1, N), 2)):
js[i], ks[i], strengths[i] = findresv3(sim, i1, i2)
Zcross[i] = (ps[int(i2)].a-ps[int(i1)].a)/ps[int(i1)].a
#print(i, i1, i2, js[i], ks[i], strengths[i])
if strengths[i] > maxstrength:
maxj, maxk, maxi1, maxi2, maxpairindex, maxstrength = js[i], ks[i], i1, i2, i, strengths[i]
features['Zcross12'] = Zcross[0]
features['Zcross13'] = Zcross[1]
features['Zcross23'] = Zcross[2]
features['maxj'] = maxj
features['maxk'] = maxk
features['maxi1'] = maxi1
features['maxi2'] = maxi2
features['maxstrength'] = maxstrength
sortedstrengths = strengths.copy()
sortedstrengths.sort() # ascending
if sortedstrengths[-1] > 0 and sortedstrengths[-2] > 0: # if two strongeest resonances are nonzereo
features['secondres'] = sortedstrengths[-2]/sortedstrengths[-1] # ratio of strengths
else:
features['secondres'] = -1
#print('max', maxi1, maxi2, maxj, maxk, maxpairindex, maxstrength)
#print('df (j, k, pairindex):', features['j'], features['k'], features['pairindex'])
times = np.linspace(0, Norbits*sim.particles[1].P, Nout)
eminus = np.zeros((Npairs, Nout))
rebound_Z, rebound_phi = np.zeros((Npairs,Nout)), np.zeros((Npairs,Nout))
rebound_Zcom, rebound_phiZcom = np.zeros((Npairs,Nout)), np.zeros((Npairs,Nout))
rebound_Zstar, rebound_dKprime = np.zeros((Npairs,Nout)), np.zeros((Npairs,Nout))
celmech_Z, celmech_phi = np.zeros(Nout), np.zeros(Nout)
celmech_Zcom, celmech_phiZcom = np.zeros(Nout), np.zeros(Nout)
celmech_Zstar, celmech_dKprime = np.zeros(Nout), np.zeros(Nout)
for i,t in enumerate(times):
for j, [i1, i2] in enumerate(itertools.combinations(np.arange(1, N), 2)):
i1, i2 = int(i1), int(i2)
eminus[j, i] = np.sqrt((ps[i2].e*np.cos(ps[i2].pomega)-ps[i1].e*np.cos(ps[i1].pomega))**2 + (ps[i2].e*np.sin(ps[i2].pomega)-ps[i1].e*np.sin(ps[i1].pomega))**2)
if js[j] != -1:
pvars = Poincare.from_Simulation(sim, average=False)
avars = Andoyer.from_Poincare(pvars, j=int(js[j]), k=int(ks[j]), a10=a0[i1], i1=i1, i2=i2)
rebound_Z[j, i] = avars.Z
rebound_phi[j, i] = avars.phi
rebound_Zcom[j, i] = avars.Zcom
rebound_phiZcom[j, i] = avars.phiZcom
rebound_Zstar[j, i] = avars.Zstar
rebound_dKprime[j, i] = avars.dKprime
try:
sim.integrate(t, exact_finish_time=0)
except:
break
mask = eminus[0] > 0 # where there are data points in case sim ends early
times = times[mask]
eminus = eminus[:, mask]
rebound_Z, rebound_phi = rebound_Z[:, mask], rebound_phi[:, mask]
rebound_Zcom, rebound_phiZcom = rebound_Zcom[:, mask], rebound_phiZcom[:, mask]
rebound_Zstar, rebound_dKprime = rebound_Zstar[:, mask], rebound_dKprime[:, mask]
celmech_Z, celmech_phi, celmech_Zcom, celmech_phiZcom = celmech_Z[mask], celmech_phi[mask], celmech_Zcom[mask], celmech_phiZcom[mask]
celmech_Zstar, celmech_dKprime = celmech_Zstar[mask], celmech_dKprime[mask]
for i, s in zip([0,2], ['12', '23']): # take adjacent ones
EM = eminus[i]
Zc = Zcross[i]
features['EMmed'+s] = np.median(EM)/Zc
features['EMmax'+s] = EM.max()/Zc
try:
p = np.poly1d(np.polyfit(times, EM, 3))
m = p(times)
EMdrift = np.abs((m[-1]-m[0])/m[0])
features['EMdrift'+s] = EMdrift
except:
features['EMdrift'+s] = np.nan
maxindex = (m == m.max()).nonzero()[0][0] # index where cubic polynomial fit to EM reaches max to track long wavelength variations (secular?)
if EMdrift > 0.1 and (maxindex < 0.01*Nout or maxindex > 0.99*Nout): # don't flag as not capturing secular if Z isn't varying significantly in first place
features['capseculartscale'+s] = 0
else:
features['capseculartscale'+s] = 1
features['EMdetrendedstd'+s] = pd.Series(EM-m).std()/EM[0]
rollstd = pd.Series(EM).rolling(window=100).std()
features['EMrollingstd'+s] = rollstd[100:].median()/EM[0]
var = [EM[:j].var() for j in range(len(EM))]
try:
p = np.poly1d(np.polyfit(times[len(var)//2:], var[len(var)//2:], 1)) # fit only second half to get rid of transient
features['DiffcoeffEM'+s] = p[1]/Zc**2
except:
features['DiffcoeffEM'+s] = np.nan
features['medvarEM'+s] = np.median(var[len(var)//2:])/Zc**2
if strengths[i] != -1:
Z = rebound_Z[i]
features['Zmed'+s] = np.median(Z)/Zc
features['Zmax'+s] = rebound_Z[i].max()/Zc
try:
p = np.poly1d(np.polyfit(times, Z, 3))
m = p(times)
features['Zdetrendedstd'+s] = | pd.Series(Z-m) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Parsing of a csv game tracking sheet of type 'X', saving data in consice and relevant manner."""
# Here comes your imports
import sys
import logging as log
import pandas as pd
# Here comes your (few) global variables
# Here comes your class definitions
# Here comes your function definitions
def parse_pre_shot_situation(data, out):
"""parse the situation leading to the shot"""
# (cycle / free-hit / develop / counter / turnover / rebound / penalty / others)
situation_labels = \
['Festsetzen', 'Freischlag', 'Auslösung', 'Konter', \
'Ballgewinn', 'Abpraller', 'Penalty', 'Sonstige']
situation_categories = \
['CYC', 'FHT', 'DVL', 'CNT', 'TNV', 'RBD', 'PNT', 'OTH']
shot_situations = data[situation_labels]
shot_situations.columns = situation_categories
situation_count = shot_situations.notna().sum(axis=1)
if (situation_count != 1).any():
log.warning('no pre shot situation:\n%s', shot_situations[situation_count < 1])
log.warning('multiple pre shot situations:\n%s', shot_situations[situation_count > 1])
situation = pd.Categorical([''] * len(shot_situations.index), categories=situation_categories)
for label, content in shot_situations.items():
situation[content.notna()] = label
log.debug(pd.Series(situation))
log.debug(pd.Series(situation).value_counts())
out['sh_situ'] = pd.Series(situation)
def parse_shot_type(data, out):
"""parse the type of the shot"""
# (wrist / chip / slap / backhand / one-timer / volley / tip / in-tight)
type_labels = \
['Gezogen', 'Chip', 'Slapshot', 'Backhand', 'Direkt', 'Volley', 'Ablenker', 'InTight']
type_categories = \
['WRS', 'CHP', 'SLP', 'BKH', 'ONT', 'VOL', 'TIP', 'INT']
shot_types = data[type_labels]
shot_types.columns = type_categories
type_count = shot_types.notna().sum(axis=1)
if (type_count != 1).any():
log.warning('no shot type:\n%s', shot_types[type_count < 1])
log.warning('multiple shot types:\n%s', shot_types[type_count > 1])
shot_type = pd.Categorical([''] * len(shot_types.index), categories=type_categories)
for label, content in shot_types.items():
shot_type[content.notna()] = label
log.debug(pd.Series(shot_type))
log.debug(pd.Series(shot_type).value_counts())
out['sh_type'] = pd.Series(shot_type)
def parse_shot_result(data, out):
"""parse the result (blocked / missed / on-goal / goal) of the event / shot"""
result_categories = ['BL', 'MI', 'SOG', 'G']
shot_results = data[result_categories]
log.debug(shot_results.info())
result_count = shot_results.notna().sum(axis=1)
if (result_count < 1).any():
log.warning('no shot result:\n%s', shot_results[result_count < 1])
if (result_count > 1).any():
log.debug('multiple shot results:\n%s', shot_results[result_count > 1])
result = pd.Categorical([''] * len(shot_results.index), categories=result_categories)
for label, content in shot_results.items():
result[content.notna()] = label
log.debug(pd.Series(result))
log.debug(pd.Series(result).value_counts())
out['sh_outc'] = pd.Series(result)
def parse_involved_players_for(data, out):
"""parse the involved (on-field) players for"""
prefix = 'hm_'
players_goalies = data.filter(regex=("^g?[0-9]+$"))
numbers = pd.Series(list(players_goalies))
col = [prefix + str(i) for i in range(1, 7)]
players = pd.DataFrame('', index=players_goalies.index, columns=col)
for index, event in players_goalies.iterrows():
players_on = numbers[event.notna().values]
player_count = len(players_on)
if len(col) >= player_count:
players.iloc[index, 0:player_count] = players_on.values
else:
log.warning('too many players, index : %d', index)
log.debug(players_on)
log.debug(players)
for label, content in players.items():
out[label] = content
def parse_involved_players_against(data, out):
"""parse the involved (on-field) players against"""
prefix = 'aw_'
suffix = '_against'
players_goalies = data[['players' + suffix, 'goalie' + suffix]]
default_number = '?'
col = [prefix + str(i) for i in range(1, 7)]
players = pd.DataFrame('', index=players_goalies.index, columns=col)
for index, event in players_goalies.iterrows():
players_on = \
([default_number] * event.loc['players' + suffix]) + \
(['g' + default_number] * event.loc['goalie' + suffix])
player_count = len(players_on)
if len(col) >= player_count:
players.iloc[index, 0:player_count] = players_on
else:
log.warning('too many players, index : %d', index)
log.debug(players_on)
log.debug(players)
for label, content in players.items():
out[label] = content
def parse_acting_players(data, out):
"""parse the acting players (shot, assist, block) from the columns with player numbers"""
players_goalies = data.filter(regex=("^g?[0-9]+$"))
actions = | pd.DataFrame('', index=players_goalies.index, columns=['shot', 'assist', 'block']) | pandas.DataFrame |
# https://www.kaggle.com/tocha4/lanl-master-s-approach
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy as sc
import matplotlib.pyplot as plt
import seaborn as sns
import gc
import warnings
warnings.filterwarnings("ignore")
warnings.simplefilter(action='ignore', category=FutureWarning)
from tqdm import tqdm_notebook
import datetime
import time
import random
from joblib import Parallel, delayed
import lightgbm as lgb
from tensorflow import keras
from gplearn.genetic import SymbolicRegressor
#from catboost import Pool, CatBoostRegressor
from litemort import *
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_absolute_error,mean_squared_error
from sklearn.model_selection import GridSearchCV, KFold, RandomizedSearchCV
from sklearn.feature_selection import RFECV, SelectFromModel
import os
import sys
import pickle
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import NuSVR, SVR
from sklearn.kernel_ridge import KernelRidge
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
today = datetime.date.today().strftime('%m%d')
isMORT = len(sys.argv)>1 and sys.argv[1] == "mort"
#isMORT = True
#some_rows=3000
some_rows=None
model_type='mort' if isMORT else 'lgb'
nVerbose = 500
pkl_path = 'G:/kaggle/Earthquake/data/anton_2_{}.pickle'.format(some_rows)
pkl_path = 'G:/kaggle/Earthquake/data/anton_cys0_{}.pickle'.format(some_rows)
eval_metric='l1'
min_error = mean_squared_error if eval_metric=='l1' else mean_absolute_error
params = {
'n_estimators':50000, #减少n_estimators 并不能控制overfit
'early_stopping_rounds': 200,
'num_leaves': 256, #128
#'max_bin': 64,
'min_data_in_leaf': 32, #79
'objective': 'tweedie', #'regression',
'max_depth': -1,
'learning_rate': 0.01,
#"boosting": "gbdt",
"bagging_freq": 5,
"bagging_fraction": 1,#0.8126672064208567, #0.8126672064208567,
"bagging_seed": 11,
"metric": 'mae',
"verbosity": nVerbose,
#'reg_alpha': 0.1302650970728192,
#'reg_lambda': 0.3603427518866501,
'colsample_bytree': 0.05
}
print("params=\n{}\n".format(params))
submission = pd.read_csv('G:/kaggle/Earthquake/input/sample_submission.csv')
def Load_MoreDatas(paths):
train_s=[]
y_s=[]
for path,nFile in paths:
for i in range(nFile):
path_X,path_y="{}/train_X_features_{}.csv".format(path,i+1),"{}/train_y_{}.csv".format(path,i+1)
X_ = pd.read_csv(path_X)
y_ = pd.read_csv(path_y, index_col=False, header=None)
train_s.append(X_)
y_s.append(y_)
print("X_[{}]@{}\ny_[{}]@{}".format(X_.shape,path_X,y_.shape,path_y))
if len(train_s)>0:
train_X = pd.concat(train_s, axis=0)
y = pd.concat(y_s, axis=0)
train_X = train_X.reset_index(drop=True)
y = y.reset_index(drop=True)
print("Load_MoreDatas X_[{}] y_[{}]".format(train_X.shape, y.shape))
return train_X,y
if os.path.isfile(pkl_path):
print("\n======load pickle file from {} ...".format(pkl_path))
with open(pkl_path, "rb") as fp: # Pickling
[train_X, test_X, train_y] = pickle.load(fp)
if some_rows is not None:
train_X = train_X[:some_rows]
test_X = test_X[:some_rows]
train_y = train_y[:some_rows]
print("\n======train_X={} test_X={} train_y={} \n".format(train_X.shape, test_X.shape, train_y.shape))
else:
#train_X_2,y_2 = Load_MoreDatas([('G:/kaggle/Earthquake/data/cys/15000', 14),
# ('G:/kaggle/Earthquake/data/cys/17000', 15)])
train_X_0 = pd.read_csv("G:/kaggle/Earthquake/data/train_X_features_865_0.csv")
train_X_1 = pd.read_csv("G:/kaggle/Earthquake/data/train_X_features_865_1.csv")
y_0 = | pd.read_csv("G:/kaggle/Earthquake/data/train_y_0.csv", index_col=False, header=None) | pandas.read_csv |
# coding: utf-8
# # Structural durability analyses for carbon/epoxy laminates
#
# ## §3: Experimental
# In[39]:
#Preamble to hide inputs so that massive code scripts are not cluttering the data visualization output
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''')
# # DLN Contents
#
# # DLN Contents
#
# 0. [Materials Characterization Laboratory DLN | A Showcase for Convergent Manufacturing Group Ltd](DLN_0_About_Me.ipynb) - An 'Welcome' message to the Convergent Manufacturing - Materials Characterization Group, explaining the concept of these DLN entries, why I made them out of interest for the team's *Characterization Lab Technician/Scientist* opening, and presenting a brief 'About Me' StoryMap
# <br>
#
# 1. [§1: Structural durability analyses of carbon fibre & epoxy-based composites - Introduction](DLN_1_Introduction.ipynb) - An introduction to the quasi-fatigue experiments performed on carbon fibre/epoxy composite specimens.
# <br>
#
# 2. [§2: Structural durability analyses of carbon fibre & epoxy-based composites - Laminate mechanics theory](DLN_2_Theory.ipynb) - A discussion of composite laminate theory, as a basis for performing stress-strain-deformation calculations to characterize the structural durability of composite laminate layups.
# <br>
#
# 3. [§3: Structural durability analyses of carbon fibre & epoxy-based composites - Experimental results](DLN_3_Experimental.ipynb) - Using Python scientific programming libraries to explore and visualize quasi-fatigue tensile & compressive loading experiments on carbon fibre/epoxy composite test coupons.
# <br>
#
# 4. [§4: Structural durability analyses of carbon fibre & epoxy-based composites - Matrix calculations](DLN_4_Calculations.ipynb) - Using MATLAB to perform structural durability matrix calculations from carbon fibre/epoxy composite test coupon experimental data.
# ## I. Experiment log
# * **Date of experiment**: 10.14.2017
# * **Principle investigator**: <NAME>, EIT, BASc
# * **Test operators**: <NAME>, <NAME>, <NAME>
# * **Lead investigator**: Prof. <NAME>
# * **Course**: LVA Nr. 378.029 - 480ADPTPPBV17 - Polymer Product Design and Engineering III - Graduate Seminar
# * **Location**: Institute of Polymer Materials and Testing (IPMT), JKU Linz - Linz, Austria
# * Compounding and specimen preparation Lab: *Composite coupon specimen preparation*
# * Mechanical Lab: *Tensile & compression testing*
#
# ### i. Experiment test (lab) environment conditions
# *Measurement taken: 10-14-2017 08:45:07*
# <b>
#
# $T_{test} (°C) = 21.23$ (*within* $ 23 ± 3 °C$ *standard laboratory atmosphere range as per ASTM D5229*)
# <br>
# $RH (\%) = 55.7$ (*within* $ 50 ± 10 \%$ *standard laboratory atmosphere range as per ASTM D5229*)
# ## 3.1 Composite specimens to be tested for structural durability analyses
# ### 3.1.1 Properties applicable to all test coupons
# * **Composite type**: CFRP - carbon/epoxy laminates
# * Carbon fibre ply:
# * Unidirectional 0°:
# * Unidirectional 90°:
# * Unidirectional 45°:
# * Unidirectional ±45°: [HiMax™ FCIM151](https://www.hexcel.com/user_area/content_media/raw/FCIM151.pdf)
# * Epoxy resin system: [HexPly® M35-4](https://www.hexcel.com/user_area/content_media/raw/HexPly_M354_DataSheet.pdf)
# * **Void fraction ($v_{f}$, %)**: 55
# * **Test speed (mm/min)**: 1
# * **No. of samples per test**: 3
#
# ### 3.1.2 Properties applicable to all specimens
# The following table details the specimens to be tested for the investigation:
#
# **<center>Table 1. Set of carbon fibre/epoxy laminate coupons for quasi-static fatigue testing</center>**
#
# | Coupon tag | Direction | Orientation [°] | Loading | No. of Layers | Avg. Coupon Width [mm] | Avg. Coupon Thickness [mm] |
# |:----------:|:---------:|:---------------:|:-----------:|:-------------:|:----------------------:|:--------------------------:|
# | UD_0_4_T | UD | 0 | Tension | 4 | 9.98 | 1.02 |
# | UD_90_8_T | UD | 90 | Tension | 8 | 20.02 | 1.98 |
# | BD_±45_8_T | BD | ±45 | Tension | 8 | 20.1 | 1.95 |
# | UD_45_8_T | UD | 45 | Tension | 8 | 20.06 | 2.01 |
# | UD_0_4_C | UD | 0 | Compression | 4 | 9.98 | 1.01 |
# | UD_90_8_C | UD | 90 | Compression | 8 | 19.98 | 2.02 |
#
# As a reference, ply (laminate) layers were layed-up according to the following convention:
#
# 
#
# *<center> Fig. Y - UD lamina lay-up orientations (JKU Linz - IPPE, 2017)</center>*
#
#
# ### 3.1.3 References
# [1] *<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2014). Modelling and validation of fatigue life calculation method for short fiber reinforced injection molded parts. In 16th European conference of composite materials.* [Link](http://www.escm.eu.org/eccm16/assets/0329.pdf)
#
# [2] *<NAME>. (1999). Mechanics of composite materials. 2nd Ed. CRC press.*
# ## 3.2 Carbon fibre/epoxy test coupon fabrication
# 1. Carbon/epoxy specimens with 55% fibre volume fraction were produced with the following materials:
#
# * **Epoxy resin and system**: HexPly® M35-4
# * **Carbon fibres**: HiMax™ FCIM151
# * **Other**: Epoxy-compatible binder was used to make handling the layup of carbon fibre sheets easier and to prevent distortion during the layup manufacturing
# <br>
#
#
# 2. The specimen laminates were produced according to the cure cycle protocol indicated in the HexPly® M35-4 technical specification, with:
# * the cure temperature set at $100 \pm 2.5°C$
# * cure processing time set at 4.5 hours
# * heat-up and cool-down rates set at 1°C/minute, vacuum applied at -0.85 bar
# * autoclave pressure set to 7.5 bar-g.
# <br>
#
# 3. Unidirectional (UD) carbon/epoxy specimens were milled from plates with diamond blades at angles of 0°, 45°/±45° and 90°
# <br>
#
# 4. Specimen geometries for mechanical tests with carbon/epoxy specimens were chosen according to the following specifications:
# * Rectangular specimens (especially for UD)
# * Tabs for load introduction
# * Tab material had to possess a lower stiffness than tested materials; testing CF/epoxy composite coupons - aluminum tabs were used for fabrication
# <br>
#
# The following figure shows the dimensions used to prepare the composite coupons for testing:
#
# 
#
# *<center> Fig. 3.1 - Test coupon geometries for UD quasi-isotropic and 0° (JKU Linz - IPPE, 2017)</center>*
#
# 4-ply UD specimens had the following geometry:
# * $200 \pm 0.1 mm \quad\quad x \quad\quad 10 \pm 0.025 mm \quad\quad x \quad\quad 1 \pm 0.025 mm$ for UD 0° specimens
#
# 8-ply UD specimens had the following geometry:
# * $200 \pm 0.1 mm \quad\quad x \quad\quad 20 \pm 0.1 mm \quad\quad x \quad\quad 2 \pm 0.025 mm$ for 8-ply/off-axis specimens
#
# * Aluminium tabs with 1.5 mm thickness were adhered on both sides of all carbon/epoxy specimens( For tensile loads, usually 1 mm thickness is chosen for specimens tested in fibre direction)
# ## 3.3 Experimental equipment and data reduction process
# ### 3.3.1 Data analysis
# For data evaluation, all moduli and strengths were calculated with the real cross-sections of the respective tested specimens. Moduli were evaluated between 0.001 and 0.003 absolute strain, as per:
# * ASTM D3039/D3039M: Standard test method for tensile properties of polymer matrix composite materials
#
# * ASTM D3410 / D3410M: Standard Test Method for Compressive Properties of Polymer Matrix Composite Materials
#
# * ASTM_E111-04: Standard Test Method for Young’s Modulus, Tangent Modulus, and Chord Modulus
#
# ### 3.3.2 Equipment
# * All mechanical tests were performed on a Zwick-Roell HA 100 kN servo-hydraulic fatigue test machine designed for loads up to 100 kN at room temperature
#
# ### 3.3.3 Quasi-static tension and compression tests
# * In quasi-static tension and compression tests, specimens were loaded in a displacement controlled way with a test speed of 1 mm/min
# * End-tabs were clamped completely between the Zwick-Roell HA system grips
# * Strains in longitudinal direction were recorded by means of a proprietary digital sensor setup (JKU Linz IPMT)
# * The experiment runs were designed to investigate the in-plane tensile and compressive properties of polymer matrix composite materials reinforced by high-modulus fibers (in this case, carbon fibre/epoxy laminate composites). The applicability of the ASTM test method are limited to continuous fiber or discontinuous fiber-reinforced composite material forms, in which the laminate is balanced and/or symmetric with respect to the test direction
# ## 3.4 Experimental data analyses - Python Preamble
# ### 3.4.1 Premable for python object-oriented programming
# In[40]:
##===============================IMAGES============================================================
#Image import preamble
import IPython
from IPython.display import display, Image, SVG, Math, YouTubeVideo
Image_PATH = "/Users/delroy_m/Desktop/(CMT) Materials Characterization ELN/0.Images/"
# Use 'image drag & drop' IPython Notebook Extension
#IPython.html.nbextensions.install_nbextension('https://raw.github.com/ipython-contrib/IPython-notebook-extensions/master/nbextensions/usability/dragdrop/main.js')
#Load 'image drag & drop' extension
#%javascript
#IPython.load_extensions('usability/dragdrop/main');
#NOTE about 'image drag & drop' extension handling of images
# The image will be uploaded to the server into the directory where your notebook resides. This means, the image is not copied into the notebook itself, it will only be linked to.
##===============================DATA ANALYSES=====================================================
#import PANDAS - A library providing high-performance, easy-to-use data structures and data analysis tools
import pandas as pd
#print("Current Pandas version:", pd.__version__)
# print("plotly version:", __version__)
#import SciPy - A Python-based ecosystem of open-source software for mathematics, science, and engineering
import scipy
from scipy import *
#Import Gaussian distribution STATS package to validate whether experimental data is randomly (normally)
#distributed
from scipy.stats import *
#from scipy.stats import norm
# if using a Jupyter notebook, include:
#%matplotlib inline
#import NumPy - A fundamental package for scientific computing with Python
import numpy as np
#import qgrid - Allows querying of DataFrames with intuitive scrolling, sorting, and filtering controls,
#as well as editing features, for the DataFrames, by double clicking cells
import qgrid
##===============================DATA VISUALIZATION================================================
#import matplotlib - A Python 2D plotting library
#import matplotlib.pyplot as plt
#import Pygal - A Python SVG Charts Creator
import pygal
#import Plotly for online or offline interactive plot rendering
#
#If using Plotly with online server:
#import plotly.plotly as py
#
#If using Plotly offline and saving code/graphs/images locally:
import plotly.graph_objs as go
import plotly as py
from plotly import __version__ #ensures that most up-to-date plotly pckg is being used
from plotly.offline import init_notebook_mode, plot, download_plotlyjs, iplot
import plotly.figure_factory as ff
from plotly import tools
#Improve Plotly figure render responsiveness
import plotly.io as pio
pio.renderers.default = 'iframe'
# #import cufflinks as cf
#import Seaborn - Statistical data visualization using Matplotlib
import seaborn as sns
#from matplotlylib import fig_to_plotly
#import Plotly express - A terse, consistent, high-level wrapper around Plotly.py for rapid data exploration and figure generation
#import plotly_express as px
#Put plotly environment in 'offline mode'
py.offline.init_notebook_mode(connected=True)
#Reinitialize Jupyter Notebook mode
init_notebook_mode()
#For 'online' plotting:
# Learn about API authentication here: https://plot.ly/pandas/getting-started
# Find your api_key here: https://plot.ly/settings/api
#Do I have the most up-to-date plotly package?
#print("Current Plotly version:", __version__)
##===============================SYSTEM COMMANDS====================================================
import glob
import sys
import datetime
import os
##===============================EXCEPTION HANDLING=================================================
#Ignore dataframe slicing copying warnings --> these are annoying, and issue is acknowledged
pd.options.mode.chained_assignment = None # default='warn'
#Mute any annoying compiling warnings that arise when running code
#import warnings
#warnings.filterwarnings("ignore")
# ### 3.4.2 Setup framework for parsing quasi-static fatigue experimental data into Python (Pandas) dataframes
# In[41]:
##===============================Create dataframe from experiment data================================
#Coupon cyclic fatigue testing datasets - formatted according to "<NAME> - Tidy Data"
#"<NAME> - Tidy Data" - http://vita.had.co.nz/papers/tidy-data.pdf
#1. Each variable forms a column
#2. Each observation forms a row
#3. Each type of observational unit forms a table
##----------------------------------------------------------------------------------------------------
##-Naming convention for experiment files-##
#
#[Fiber_direction]-[Orientation_degree]-[Tension/Compression]-[Fibre_type]-[Test_speed (mm/min)]-[Test_temp]...
#-[Strain_in_load_direction]-[#_of_specimens_tested]-[specimen_avg_width (mm)]-[specimen_avg_thickness (mm)].xlsx
#"Experiment data attribute tags
####----------------------------------------------------------------------------------------------------
# 1. Fiber_direction:
# - Unidirectional (UD): 0°, 90° --> Provides longitudinal stiffness
# - Bidirectional (BD): ±45° --> Provides torsional stiffness
# * Attribute_type = [Alpha]
#
# 2. Orientation (°): 0°, ±45°, 90°
# * Attribute_type = [Alphanumeric]
#
# 3. Tension/compression loading:
# - T: Tension
# - C: Compression
# * Attribute_type = [Alpha]
#
# 8. Strain-in-load direction (x, y, x &/OR y):
# - UD: ε,y
# - BD: ε,y &/OR ε,x
# * Attribute_type = [Alphanumeric]
#
# 9. No. of specimens tested (#):
# * Attribute_type = [Numeric]
#
# 10. Specimens avg. width (mm):
# * Attribute_type = [Numeric]
#
# 11. Specimens avg. thickness (mm):
# * Attribute_type = [Numeric]
#
#
#"Experiment data variables
####----------------------------------------------------------------------------------------------------
#Column 1:
# - Tension or compression load [N]
#
##Column 2:
# - Strain [%]
#Custom color palette for plotting
####----------------------------------------------------------------------------------------------------
#Column 1:
dark_turquoise = '#00CED1'
turquoise = '#40E0D0'
medium_turquoise = '#48D1CC'
pale_turquoise = '#AFEEEE'
aqua_marine = '#7FFFD4'
powder_blue = '#B0E0E6'
cadet_blue = '#5F9EA0'
steel_blue = '#4682B4'
corn_flower_blue = '#6495ED'
deep_sky_blue = '#00BFFF'
dodger_blue = '#1E90FF'
light_blue = '#ADD8E6'
sky_blue = '#87CEEB'
light_sky_blue = '#87CEFA'
midnight_blue = '#191970'
navy = '#000080'
dark_blue = '#00008B'
medium_blue = '#0000CD'
blue = '#0000FF'
royal_blue = '#4169E1'
# ### 3.4.3 Parse quasi-static fatigue experimental data into data frame
# In[42]:
#Upload all 'cleaned' experimental data sets for composite coupon fatigue testing
##===============================DEFINE DATA DIRECTORY=============================================
#Data import from local server
#Used so that data files & code are not mixed together + makes it easy to change working
#directory to where data is stored
#Set desired directory path here
desired_dir = r"/Users/delroy_m/Desktop/(CMT) Materials Characterization ELN/2. Cleaned data/Quasi_static_data"
work_dirPath = os.chdir(desired_dir) #Set the current directory to the desired working directory path
verify_cwd_path = os.getcwd()
print("CWD: " + verify_cwd_path)
##===============================Import cleaned experiment data======================================
qsf_expt_data = glob.glob('*.xlsx') # Get all files from all subfolders.
qsf_expt_data
#Define DataFrame to store quasi-static fatigue .xlsx experiment files
qsf_df = pd.DataFrame()
#Enter test (lab) environment measurements for completeness of data parsing
T_test = 21.23
RH_test = 55.7
#Pandas 'read_excel' syntax
#pandas.read_excel(io, sheet_name=0, header=0, names=None, index_col=None, parse_cols=None,
# true_values=None, false_values=None, skiprows=None, nrows=None, na_values=None,
# keep_default_na=True, verbose=False, parse_dates=False, date_parser=None,
# thousands=None, comment=None, skip_footer=0, skipfooter=0, convert_float=True,
# mangle_dupe_cols=True, **kwds)
#loop to establish columns for DataFrame
for i, P in enumerate(qsf_expt_data): #i: counter, P: place holder
#print(P)
eqsf_df = | pd.read_excel(P, header=None) | pandas.read_excel |
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.tslib as tslib
import pandas.util.testing as tm
import pandas.tseries.period as period
from pandas import (DatetimeIndex, PeriodIndex, period_range, Series, Period,
_np_version_under1p10, Index, Timedelta, offsets)
from pandas.tests.test_base import Ops
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub(self):
rng = | period_range('2007-01', periods=50) | pandas.period_range |
import pandas as pd
from utils_dr_pre_word_simi import *
import os
from utils import *
from transformers import *
from dataset import Dataset_dr
import torch
import numpy as np
TRAIN_DR = './con_rew_data/para/train.csv'
DEV_DR = './con_rew_data/para/dev.csv'
TEST_DR = './con_rew_data/para/test.csv'
fw = open('verb_no_simi.txt', 'w')
VER_MAG_RATE = 1.5
VER_ADD_VAL = 5
batchsize_dr = 4
device_dr = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
verb2simi = load_word2simi()
tokenizer_dr = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
token_dict_dr = {
'bos_token': '<start>',
'eos_token': '<end>',
'pad_token': '<pad>',
'cls_token': '<cls>',
'additional_special_tokens': ['<pos>', '<neg>', '<equal>', '<VERB>']
}
num_added_token_dr = tokenizer_dr.add_special_tokens(token_dict_dr)
print(tokenizer_dr.vocab_size)
cats = ['pos', 'neg', 'equal']
def agen_vector(tokenizer, num_added, multi=True):
agen_vectors = {}
for label, verbset in agen_v.items():
if multi:
vector = torch.ones(tokenizer.vocab_size + num_added)
else:
vector = torch.zeros(tokenizer.vocab_size + num_added)
for v in verbset:
forms = infi2allforms(v)
for form in forms:
v_li = tokenizer.encode(form)
if multi:
vector[v_li[0]] *= VER_MAG_RATE
else:
vector[v_li[0]] = VER_ADD_VAL
agen_vectors[label] = vector
return agen_vectors
def infi2allforms(word):
res = []
row = verb_form[verb_form[0] == word]
if row.empty:
res.append(word)
return res
row = row.dropna(axis=1)
for col in row.columns:
res.append(row[col].iloc[0])
return res
def get_he_df(df):
df['cri'] = df['sen'].str.replace(' ', '')
df['cri'] = df['cri'].str.lower()
he_df = | pd.read_csv(ROC_TEST_HE) | pandas.read_csv |
#! -*- coding:utf-8 -*-
import os
import re
import gc
import sys
import json
import codecs
import random
import warnings
import numpy as np
import pandas as pd
import textdistance
from tqdm import tqdm
import tensorflow as tf
from random import choice
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.metrics import f1_score, accuracy_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
import keras.backend as K
from keras.layers import *
from keras.callbacks import *
from keras.models import Model
from keras.optimizers import Adam
from keras.initializers import glorot_uniform
from keras_bert import load_trained_model_from_checkpoint, Tokenizer
tqdm.pandas()
seed = 2019
random.seed(seed)
tf.set_random_seed(seed)
np.random.seed(seed)
warnings.filterwarnings('ignore')
data_path = '../dataSet/'
train = pd.read_csv(data_path + 'Round2_train.csv', encoding='utf-8')
test = pd.read_csv(data_path + 'round2_test.csv', encoding='utf-8')
train_preliminary = | pd.read_csv(data_path + 'Train_Data.csv', encoding='utf-8') | pandas.read_csv |
import numpy as np
import pandas as pd
from tqdm import tqdm
import cv2
import pyfeats
from utils import Plaque
#%% Path & Name of Plaque
path = './data/'
labels = pd.read_excel(path+'labels.xlsx')
path_features = './results/features/'
IMG_NO = len(labels)
#%% Parameters
perc = 1 # Percentage of the plaque to take into consideration when calculating features in (0,1]
Dx = [0,1,1,1] # A. Early Textural - GLDS
Dy = [1,1,0,-1] # A. Early Textural - GLDS
d = 1 # A. Early Textural - NGTDM
Lr, Lc = 4, 4 # A. Early Textural - SFM
l = 7 # A. Early Textural - LTE
s = 4 # A. Early Textural - FDTA
th = [135,140] # A. Late Textural - HOS
P = [8,16,24] # A. Late Textural - LBP
R = [1,2,3] # A. Late Textural - LBP
N = 30 # B Morphology
bins_hist = 32 # C. Histogram - All
num_eros = 3 # C. Histogram - Multi-region Histogram
square_size = 3 # C. Histogram - Multi-region Histogram
wavelet_dwt = 'bior3.3' # D. Multi-Scale - DWT
wavelet_swt = 'bior3.3' # D. Multi-Scale - SWT
wavelet_wp = 'coif1' # D. Multi-Scale - WP
levels_dwt = 3 # D. Multi-Scale - DWT
levels_swt = 3 # D. Multi-Scale - SWT
levels_wp = 3 # D. Multi-Scale - WP
bins_digitize = 32 # C. Histogram - Correlogram
bins_hist_corr = 32 # C. Histogram - Correlogram
zernikes_radii = 9 # E. Other - Zernikes Moments
#%% Init arrays
names = []
# A. Textural
np_fos = np.zeros((IMG_NO,16), np.double)
np_glcm_mean = np.zeros((IMG_NO,14), np.double)
np_glcm_range = np.zeros((IMG_NO,14), np.double)
np_glds = np.zeros((IMG_NO,5), np.double)
np_ngtdm = np.zeros((IMG_NO,5), np.double)
np_sfm = np.zeros((IMG_NO,4), np.double)
np_lte = np.zeros((IMG_NO,6), np.double)
np_fdta = np.zeros((IMG_NO,s+1), np.double)
np_glrlm = np.zeros((IMG_NO,11), np.double)
np_fps = np.zeros((IMG_NO,2), np.double)
np_shape_parameters = np.zeros((IMG_NO,5), np.double)
np_hos = np.zeros((IMG_NO,len(th)), np.double)
np_lbp = np.zeros((IMG_NO,len(P)*2), np.double)
np_glszm = np.zeros((IMG_NO,14), np.double)
# B. Morphological
pdf_L = np.zeros((IMG_NO,N), np.double)
pdf_M = np.zeros((IMG_NO,N), np.double)
pdf_H = np.zeros((IMG_NO,N), np.double)
cdf_L = np.zeros((IMG_NO,N), np.double)
cdf_M = np.zeros((IMG_NO,N), np.double)
cdf_H = np.zeros((IMG_NO,N), np.double)
pdf_gray = np.zeros((IMG_NO,N), np.double)
cdf_gray = np.zeros((IMG_NO,N), np.double)
# C. Histogram
np_histogram = np.zeros((IMG_NO,bins_hist), np.double)
np_multiregion_histogram = np.zeros((IMG_NO,bins_hist*num_eros), np.double)
np_correlogram_d = np.zeros((IMG_NO,bins_digitize*bins_hist), np.double)
np_correlogram_th = np.zeros((IMG_NO,bins_digitize*bins_hist), np.double)
# D. Multi-Scale
np_dwt = np.zeros((IMG_NO,6*levels_dwt), np.double)
np_swt = np.zeros((IMG_NO,6*levels_swt), np.double)
np_wp = np.zeros((IMG_NO,(4**levels_wp-1)*2), np.double)
np_gt = np.zeros((IMG_NO,16), np.double)
np_amfm = np.zeros((IMG_NO,32*4), np.double)
# E. Other
np_hu = np.zeros((IMG_NO,7), np.double)
np_zernikes = np.zeros((IMG_NO,25), np.double)
#%% Calculate Features
progress = tqdm(range(0,IMG_NO), desc="Calculating Textural Features...")
for i in progress:
name = labels.iloc[i,0]
names.append(name)
# Load ultrasound
path_ultrasound = path + 'ultrasounds\\' + name + '.bmp'
ultrasound = cv2.imread(path_ultrasound, cv2.IMREAD_GRAYSCALE)
# Load points
path_points = path + 'points\\' + name + '_points.out'
points = np.loadtxt(path_points, delimiter=',')
points = np.array(points, np.int32)
# Load points near lumen
path_points_lumen = path + 'points_lumen\\' + name + '_points_lumen.out'
points_lumen = np.loadtxt(path_points_lumen, delimiter=',')
points_lumen = np.array(points_lumen, np.int32)
plaque = Plaque(ultrasound, points, points_lumen, name, pad=2)
plaque.mask = Plaque.get_perc_ROI(plaque.mask, plaque.perimeter_lumen, perc)
# A. Textural
progress.set_description('Calculating Early Textural Features' + ' for ' + name)
np_fos[i,:], labels_fos = pyfeats.fos(plaque.plaque, plaque.mask)
np_glcm_mean[i,:], np_glcm_range[i,:], labels_glcm_mean, labels_glcm_range = pyfeats.glcm_features(plaque.plaque, ignore_zeros=True)
np_glds[i,:], labels_glds = pyfeats.glds_features(plaque.plaque, plaque.mask, Dx=Dx, Dy=Dy)
np_ngtdm[i,:], labels_ngtdm = pyfeats.ngtdm_features(plaque.plaque, plaque.mask, d=d)
np_sfm[i,:], labels_sfm = pyfeats.sfm_features(plaque.plaque, plaque.mask, Lr=Lr, Lc=Lc)
np_lte[i,:], labels_lte = pyfeats.lte_measures(plaque.plaque, plaque.mask, l=l)
np_fdta[i,:], labels_fdta = pyfeats.fdta(plaque.plaque, plaque.mask, s=s)
np_glrlm[i,:], labels_glrlm = pyfeats.glrlm_features(plaque.plaque, plaque.mask, Ng=256)
np_fps[i,:], labels_fps = pyfeats.fps(plaque.plaque, plaque.mask)
np_shape_parameters[i,:], labels_shape_parameters = pyfeats.shape_parameters(plaque.plaque, plaque.mask, plaque.perimeter, pixels_per_mm2=1)
progress.set_description('Calculating Late Textural Features')
np_hos[i,:], labels_hos = pyfeats.hos_features(plaque.plaque, th=th)
np_lbp[i,:], labels_lbp = pyfeats.lbp_features(plaque.plaque, plaque.mask, P=P, R=R)
np_glszm[i,:], labels_glszm = pyfeats.glszm_features(plaque.plaque, plaque.mask)
# B. Morphological
progress.set_description('Calculating Morphological Features' + ' for ' + name)
pdf_gray[i,:], cdf_gray[i,:] = pyfeats.grayscale_morphology_features(plaque.plaque, N=N)
pdf_L[i,:], pdf_M[i,:], pdf_H[i,:], cdf_L[i,:], cdf_M[i,:], cdf_H[i,:] = \
pyfeats.multilevel_binary_morphology_features(plaque.plaque, plaque.mask, N=N)
# C. Histogram
progress.set_description('Calculating Histogram Features' + ' for ' + name)
np_histogram[i,:], labels_histogram = pyfeats.histogram(plaque.plaque, plaque.mask, bins_hist)
np_multiregion_histogram[i,:], labels_multiregion_histogram = pyfeats.multiregion_histogram(plaque.plaque, plaque.mask, bins=bins_hist, num_eros=num_eros,square_size=square_size)
np_correlogram_d[i,:], np_correlogram_th[i,:], labels_correlogram = pyfeats.correlogram(plaque.plaque, plaque.mask, bins_digitize=bins_digitize, bins_hist=bins_hist, flatten=True)
# D. Multi-Scale
progress.set_description('Calculating Multi-Scale Features' + ' for ' + name)
np_dwt[i,:], labels_dwt = pyfeats.dwt_features(plaque.plaque, plaque.mask, wavelet=wavelet_dwt, levels=levels_dwt)
np_swt[i,:], labels_swt = pyfeats.swt_features(plaque.plaque, plaque.mask, wavelet=wavelet_swt, levels=levels_swt)
np_wp[i,:], labels_wp = pyfeats.wp_features(plaque.plaque, plaque.mask, wavelet=wavelet_wp, maxlevel=levels_wp)
np_gt[i,:], labels_gt = pyfeats.gt_features(plaque.plaque, plaque.mask)
np_amfm[i,:], labels_amfm = pyfeats.amfm_features(plaque.plaque)
# E. Other
progress.set_description('Calculating Other Features' + ' for ' + name)
np_hu[i,:], labels_hu = pyfeats.hu_moments(plaque.plaque)
np_zernikes[i,:], labels_zernikes = pyfeats.zernikes_moments(plaque.plaque, zernikes_radii)
#%% Convert to pandas
# A. Early Textural
df_fos = pd.DataFrame(data=np_fos, index=names, columns=labels_fos)
df_glcm_mean = pd.DataFrame(data=np_glcm_mean, index=names, columns=labels_glcm_mean)
df_glcm_range = pd.DataFrame(data=np_glcm_range, index=names, columns=labels_glcm_range)
df_glds = pd.DataFrame(data=np_glds, index=names, columns=labels_glds)
df_ngtdm = pd.DataFrame(data=np_ngtdm, index=names, columns=labels_ngtdm)
df_sfm = pd.DataFrame(data=np_sfm, index=names, columns=labels_sfm)
df_lte = pd.DataFrame(data=np_lte, index=names, columns=labels_lte)
df_fdta = pd.DataFrame(data=np_fdta, index=names, columns=labels_fdta)
df_glrlm = | pd.DataFrame(data=np_glrlm, index=names, columns=labels_glrlm) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import io
import re
import demjson3
import pandas as pd
import requests
from zvt.api.utils import china_stock_code_to_id
from zvt.contract.api import df_to_db
from zvt.contract.recorder import Recorder
from zvt.domain import EtfStock, Etf
from zvt.recorders.consts import DEFAULT_SH_ETF_LIST_HEADER
from zvt.utils.time_utils import now_pd_timestamp
class ChinaETFListSpider(Recorder):
data_schema = EtfStock
def __init__(self, force_update=False, sleeping_time=10.0, provider='exchange') -> None:
self.provider = provider
super().__init__(force_update, sleeping_time)
def run(self):
# 抓取沪市 ETF 列表
url = 'http://query.sse.com.cn/commonQuery.do?sqlId=COMMON_SSE_ZQPZ_ETFLB_L_NEW'
response = requests.get(url, headers=DEFAULT_SH_ETF_LIST_HEADER)
response_dict = demjson3.decode(response.text)
df = pd.DataFrame(response_dict.get('result', []))
self.persist_etf_list(df, exchange='sh')
self.logger.info('沪市 ETF 列表抓取完成...')
# 抓取沪市 ETF 成分股
self.download_sh_etf_component(df)
self.logger.info('沪市 ETF 成分股抓取完成...')
# 抓取深市 ETF 列表
url = 'http://www.szse.cn/api/report/ShowReport?SHOWTYPE=xlsx&CATALOGID=1945'
response = requests.get(url)
df = pd.read_excel(io.BytesIO(response.content), dtype=str)
self.persist_etf_list(df, exchange='sz')
self.logger.info('深市 ETF 列表抓取完成...')
# 抓取深市 ETF 成分股
self.download_sz_etf_component(df)
self.logger.info('深市 ETF 成分股抓取完成...')
def persist_etf_list(self, df: pd.DataFrame, exchange: str):
if df is None:
return
df = df.copy()
if exchange == 'sh':
df = df[['FUND_ID', 'FUND_NAME']]
elif exchange == 'sz':
df = df[['证券代码', '证券简称']]
df.columns = ['code', 'name']
df['id'] = df['code'].apply(lambda code: f'etf_{exchange}_{code}')
df['entity_id'] = df['id']
df['exchange'] = exchange
df['entity_type'] = 'etf'
df['category'] = 'etf'
df = df.dropna(axis=0, how='any')
df = df.drop_duplicates(subset='id', keep='last')
df_to_db(df=df, data_schema=Etf, provider=self.provider, force_update=False)
def download_sh_etf_component(self, df: pd.DataFrame):
"""
ETF_CLASS => 1. 单市场 ETF 2.跨市场 ETF 3. 跨境 ETF
5. 债券 ETF 6. 黄金 ETF
:param df: ETF 列表数据
:return: None
"""
query_url = 'http://query.sse.com.cn/infodisplay/queryConstituentStockInfo.do?' \
'isPagination=false&type={}&etfClass={}'
etf_df = df[(df['ETF_CLASS'] == '1') | (df['ETF_CLASS'] == '2')]
etf_df = self.populate_sh_etf_type(etf_df)
for _, etf in etf_df.iterrows():
url = query_url.format(etf['ETF_TYPE'], etf['ETF_CLASS'])
response = requests.get(url, headers=DEFAULT_SH_ETF_LIST_HEADER)
response_dict = demjson3.decode(response.text)
response_df = pd.DataFrame(response_dict.get('result', []))
etf_code = etf['FUND_ID']
etf_id = f'etf_sh_{etf_code}'
response_df = response_df[['instrumentId', 'instrumentName']].copy()
response_df.rename(columns={'instrumentId': 'stock_code', 'instrumentName': 'stock_name'}, inplace=True)
response_df['entity_id'] = etf_id
response_df['entity_type'] = 'etf'
response_df['exchange'] = 'sh'
response_df['code'] = etf_code
response_df['name'] = etf['FUND_NAME']
response_df['timestamp'] = now_pd_timestamp()
response_df['stock_id'] = response_df['stock_code'].apply(lambda code: china_stock_code_to_id(code))
response_df['id'] = response_df['stock_id'].apply(
lambda x: f'{etf_id}_{x}')
df_to_db(data_schema=self.data_schema, df=response_df, provider=self.provider)
self.logger.info(f'{etf["FUND_NAME"]} - {etf_code} 成分股抓取完成...')
self.sleep()
def download_sz_etf_component(self, df: pd.DataFrame):
query_url = 'http://vip.stock.finance.sina.com.cn/corp/go.php/vII_NewestComponent/indexid/{}.phtml'
self.parse_sz_etf_underlying_index(df)
for _, etf in df.iterrows():
underlying_index = etf['拟合指数']
etf_code = etf['证券代码']
if len(underlying_index) == 0:
self.logger.info(f'{etf["证券简称"]} - {etf_code} 非 A 股市场指数,跳过...')
continue
url = query_url.format(underlying_index)
response = requests.get(url)
response.encoding = 'gbk'
try:
dfs = | pd.read_html(response.text, header=1) | pandas.read_html |
# coding=utf-8
"""New Credit Card Fraud Detection kernel.
Scaling and sub-sampling is being used.
"""
from sklearn.preprocessing import RobustScaler
from sklearn.model_selection import train_test_split, StratifiedKFold
import pandas as pd
from sklearn.metrics import confusion_matrix
from keras.models import Sequential
from keras.optimizers import SGD
from keras.layers import Dense, Activation, Dropout
from keras.utils import to_categorical
# Hyperparameters
EPOCHS = 30
VERBOSE = 2
OPTIMIZER = SGD()
BATCH_SIZE = 256
VERBOSE = False
def load_dataset(verbose=True):
print("Loading data...")
dataset = pd.read_csv("creditcard.csv")
if verbose:
print(dataset.head())
print(dataset.describe())
return dataset
def scale_dataset(dataset, verbose=True):
print("Scaling data...")
scaler = RobustScaler()
dataset['scl_amount'] = scaler.fit_transform(dataset['Amount'].values.reshape(-1, 1))
dataset['scl_time'] = scaler.fit_transform(dataset['Time'].values.reshape(-1, 1))
dataset.drop(['Time', 'Amount'], axis=1, inplace=True)
scl_amount = dataset['scl_amount']
scl_time = dataset['scl_time']
dataset.drop(['scl_amount', 'scl_time'], axis=1, inplace=True)
dataset.insert(0, 'Amount', scl_amount)
dataset.insert(0, 'Time', scl_time)
if verbose:
print(dataset.head())
print(dataset.describe())
return dataset
def split_dataset(dataset):
skf = StratifiedKFold(n_splits=5, random_state=None, shuffle=False)
X = dataset.drop('Class', axis=1)
y = dataset['Class']
for train_i, test_i in skf.split(X, y):
oxtrain, oxtest = X.iloc[train_i], X.iloc[test_i]
oytrain, oytest = y.iloc[train_i], y.iloc[test_i]
oxtrain = oxtrain.values
oxtest = oxtest.values
oytrain = oytrain.values
oytest = oytest.values
return oxtrain, oxtest, oytrain, oytest
def subsample_dataset(dataset, verbose=True):
print("Subsampling dataset...")
dataset = dataset.sample(frac=1)
fraud_subsample = dataset.loc[dataset["Class"] == 1]
valid_subsample = dataset.loc[dataset["Class"] == 0][:fraud_subsample.shape[0]]
equal_subsample = | pd.concat([fraud_subsample, valid_subsample]) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
'''
bentoo-calltree-analyser.py - Bottleneck analysis based on calltree
This tool computes relative/absolute percentage for selected events based on
calltree structure.
'''
from __future__ import division
from builtins import map
from past.utils import old_div
import sqlite3
import argparse
import pandas
import fnmatch
import re
import json
import sys
def glob_strings(source, patterns):
if not source or not patterns:
return []
return [x for x in source for y in patterns if fnmatch.fnmatch(x, y)]
def quote(string):
return "\"%s\"" % string
def find_first_of(contents, candidates):
for c in candidates:
try:
i = contents.index(c)
except ValueError:
i = -1
if i >= 0:
return (c, i)
return (None, -1)
def split_columns(columns):
'''split 'columns' into (index_columns, data_columns)'''
timer_column_index = columns.index("TimerName")
return (columns[:timer_column_index+1], columns[timer_column_index+1:])
def extract_column_names(conn, table="result"):
orig_row_factory = conn.row_factory
conn.row_factory = sqlite3.Row
r = conn.execute("SELECT * FROM %s LIMIT 1" % table).fetchone()
names = list(r.keys())
conn.row_factory = orig_row_factory
return names
def extract_timer_names(calltree):
timers = set()
def visit_tree(node):
timers.add(node["id"])
for x in node["children"]:
visit_tree(x)
visit_tree(calltree)
return list(timers)
def build_parent_map(calltree):
parents = {}
def visit_tree(tree, top_level=False):
if top_level:
parents[tree["id"]] = None
for x in tree["children"]:
parents[x["id"]] = tree["id"]
visit_tree(x)
visit_tree(calltree, top_level=True)
return parents
def build_abs_seq_map(calltree):
result = {}
level = {}
seq_obj = {"seq": 0}
def visit_tree(tree, curr_level=0):
level[tree["id"]] = curr_level
result[tree["id"]] = seq_obj["seq"]
seq_obj["seq"] += 1
for i, x in enumerate(tree["children"]):
visit_tree(x, curr_level+1)
visit_tree(calltree)
return (result, level)
def compute_percentage(ref_db, calltree_file, out_db,
columns=None, append=None, treelize_timer_name=False):
conn0 = sqlite3.connect(ref_db)
ref_columns = extract_column_names(conn0)
index_columns, data_columns = split_columns(ref_columns)
if columns:
for x in columns:
assert(x in data_columns)
data_columns = list(columns)
append_columns = []
if append:
append_columns.extend(append)
timer_column = find_first_of(ref_columns, ["TimerName", "Name"])[0]
if not timer_column:
raise ValueError("Can not find timer column")
index_columns.remove(timer_column)
data_columns.insert(0, timer_column)
calltree = json.load(file(calltree_file))
timer_names = extract_timer_names(calltree)
sql = list(map(quote, index_columns + data_columns + append_columns))
sql = "SELECT %s FROM result WHERE " % ", ".join(sql)
sql += " OR ".join("%s = \"%s\"" % (timer_column, x) for x in timer_names)
sql += " ORDER BY %s" % ", ".join(map(quote, index_columns))
data = | pandas.read_sql_query(sql, conn0) | pandas.read_sql_query |
# Import libraries
import os
import sys
import anemoi as an
import pandas as pd
import numpy as np
import pyodbc
from datetime import datetime
import requests
import collections
import json
import urllib3
def return_between_date_query_string(start_date, end_date):
if start_date != None and end_date != None:
start_end_str = '''AND [TimeStampLocal] >= '%s' AND [TimeStampLocal] < '%s' ''' %(start_date, end_date)
elif start_date != None and end_date == None:
start_end_str = '''AND [TimeStampLocal] >= '%s' ''' %(start_date)
elif start_date == None and end_date != None:
start_end_str = '''AND [TimeStampLocal] < '%s' ''' %(end_date)
else:
start_end_str = ''
return start_end_str
def sql_or_string_from_mvs_ids(mvs_ids):
or_string = ' OR '.join(['mvs_id = {}'.format(mvs_id) for mvs_id in mvs_ids])
return or_string
def sql_list_from_mvs_ids(mvs_ids):
if not isinstance(mvs_ids, list):
mvs_ids = [mvs_ids]
mvs_ids_list = ','.join([f"({mvs_id}_1)" for mvs_id in mvs_ids])
return mvs_ids_list
def rename_mvs_id_column(col, names, types):
name = names[int(col.split('_')[0])]
data_type = types[col.split('_')[1]]
return f'{name}_{data_type}'
# Define DataBase class
class M2D2(object):
'''Class to connect to RAG M2D2 PRD database
'''
def __init__(self):
'''Data structure for connecting to and downloading data from M2D2. Convention is::
import anemoi as an
m2d2 = an.io.database.M2D2()
:Parameters:
:Returns:
out: an.M2D2 object connected to M2D2
'''
self.database = 'M2D2'
server = '10.1.15.53' # PRD
#server = 'SDHQRAGDBDEV01\RAGSQLDBSTG' #STG
db = 'M2D2_DB_BE'
conn_str = 'DRIVER={SQL Server}; SERVER=%s; DATABASE=%s; Trusted_Connection=yes' %(server, db)
self.conn_str = conn_str #Assign connection string
try:
self.conn = pyodbc.connect(self.conn_str) #Apply connection string to connect to database
except:
print('Database connection error: you either don\'t have permission to the database or aren\'t signed onto the VPN')
def connection_check(self, database):
return self.database == database
def masts(self):
'''
:Returns:
out: DataFrame of all met masts with measured data in M2D2
Example::
import anemoi as an
m2d2 = an.io.database.M2D2()
m2d2.masts()
'''
if not self.connection_check('M2D2'):
raise ValueError('Need to connect to M2D2 to retrieve met masts. Use anemoi.DataBase(database="M2D2")')
sql_query_masts = '''
SELECT [Project]
,[AssetID]
,[wmm_id]
,[mvs_id]
,[Name]
,[Type]
,[StartDate]
,[StopDate]
FROM [M2D2_DB_BE].[dbo].[ViewProjectAssetSensors] WITH (NOLOCK)
'''
sql_query_coordinates='''
SELECT [wmm_id]
,[WMM_Latitude]
,[WMM_Longitude]
,[WMM_Elevation]
FROM [M2D2_DB_BE].[dbo].[ViewWindDataSet]'''
masts = pd.read_sql(sql_query_masts, self.conn, parse_dates=['StartDate', 'StopDate'])
coordinates = pd.read_sql(sql_query_coordinates, self.conn)
masts = masts.merge(coordinates, left_on='wmm_id', right_on='wmm_id')
masts.set_index(['Project', 'wmm_id', 'WMM_Latitude', 'WMM_Longitude', 'Type'], inplace=True)
masts.sort_index(inplace=True)
return masts
def mvs_ids(self):
masts = self.masts()
mvs_ids = masts.mvs_id.values.tolist()
return mvs_ids
def valid_signal_labels(self):
signal_type_query = '''
SELECT [MDVT_ID]
,[MDVT_Name]
FROM [M2D2_DB_BE].[dbo].[MDataValueType]'''
signal_types = pd.read_sql(signal_type_query, self.conn, index_col='MDVT_Name').MDVT_ID
return signal_types
def column_labels_for_masts(self):
masts = self.masts()
mvs_ids = masts.mvs_id.unique().tolist()
or_string = ' OR '.join(['mvs_id = {}'.format(mvs_id) for mvs_id in mvs_ids])
column_label_sql_query = '''
SELECT [column_id]
,[label]
FROM [M2D2_DB_BE].[dbo].[ViewWindogMetaData]
WITH (NOLOCK)
WHERE {}'''.format(or_string)
column_labels = pd.read_sql(column_label_sql_query, self.conn)
column_labels = column_labels.set_index('column_id')
return column_labels
def column_labels_for_data_from_mvs_ids(self, data):
masts = self.masts()
names_map = pd.Series(index=masts.mvs_id.values, data=masts.Name.values).to_dict()
types = self.valid_signal_labels()
types.loc['FLAG'] = 'Flag'
types_map = pd.Series(index=types.values.astype(str), data=types.index.values).to_dict()
data = data.rename(lambda x: rename_mvs_id_column(x, names=names_map, types=types_map), axis=1)
return data
def column_labels_for_wmm_id(self, wmm_id):
masts = self.masts()
mvs_ids = masts.loc[pd.IndexSlice[:,wmm_id],:].mvs_id.unique().tolist()
or_string = ' OR '.join(['mvs_id = {}'.format(mvs_id) for mvs_id in mvs_ids])
column_label_sql_query = '''
SELECT [column_id]
,[label]
FROM [M2D2_DB_BE].[dbo].[ViewWindogMetaData]
WITH (NOLOCK)
WHERE {}'''.format(or_string)
column_labels = pd.read_sql(column_label_sql_query, self.conn)
column_labels = column_labels.set_index('column_id')
return column_labels
def data_from_sensors_mvs_ids(self, mvs_ids, signal_type='AVG'):
'''Download sensor data from M2D2
:Parameters:
mvs_ids: int or list
Virtual sensor IDs (mvs_ids) in M2D2, can be singular
signal_type: str, default 'AVG' - NOT SUPPORTED AT THIS TIME
Signal type for download
For example: 'AVG', 'SD', 'MIN', 'MAX', 'GUST'
:Returns:
out: DataFrame with signal data from virtual sensor
'''
if not isinstance(mvs_ids, list):
mvs_ids = [mvs_ids]
valid_mvs_ids = self.mvs_ids()
assert all([mvs_id in valid_mvs_ids for mvs_id in mvs_ids]), f'One of the following is not a valid mvs_id: {mvs_ids}'
mvs_ids_list = sql_list_from_mvs_ids(mvs_ids)
sql_query= f"""
SET NOCOUNT ON
DECLARE @ColumnListID NVARCHAR(4000)
,@startDate DATETIME2
,@endDate DATETIME2
SET @ColumnListID= '{mvs_ids_list}'
SET @startDate = NULL
SET @endDate = NULL
EXECUTE [dbo].[proc_DataExport_GetDataByColumnList]
@ColumnListID
,@startDate
,@endDate
"""
data = pd.read_sql(sql_query, self.conn, index_col='CorrectedTimestamp')
data.index.name = 'stamp'
data.columns.name = 'sensor'
data = self.column_labels_for_data_from_mvs_ids(data)
return data
def data_from_mast_wmm_id(self, wmm_id):
'''Download data from all sensors on a mast from M2D2
:Parameters:
wmm_id: int
Mast ID (wmm_id) in M2D2
:Returns:
out: DataFrame with signal data from each virtual sensor on the mast
'''
masts = self.masts()
wmm_ids = masts.index.get_level_values('wmm_id').sort_values().unique().tolist()
assert wmm_id in wmm_ids, f'the following is not a valid wmm_id: {wmm_id}'
mvs_ids = masts.loc[pd.IndexSlice[:,wmm_id],:].mvs_id.values.tolist()
data = self.data_from_sensors_mvs_ids(mvs_ids)
return data
def metadata_from_mast_wmm_id(self, wmm_id):
'''Download mast metadata from M2D2
:Parameters:
wmm_id: int
Mast ID (wmm_id) in M2D2
:Returns:
out: DataFrame with mast metadata
'''
sql_query= '''
SELECT [WMM_Latitude]
,[WMM_Longitude]
,[WMM_Elevation]
FROM [M2D2_DB_BE].[dbo].[ViewWindDataSet]
WHERE wmm_id = {}
'''.format(wmm_id)
mast_metadata = pd.read_sql(sql_query, self.conn)
return mast_metadata
def mast_from_wmm_id(self, wmm_id):
'''Download an.MetMast from M2D2
:Parameters:
wmm_id: int
Mast ID (wmm_id) in M2D2
:Returns:
out: an.MetMast with data and metadata from M2D2
'''
print(f'Downloading Mast {wmm_id} from M2D2')
data = self.data_from_mast_wmm_id(wmm_id=wmm_id)
metadata = self.metadata_from_mast_wmm_id(wmm_id=wmm_id)
mast = an.MetMast(data=data,
name=wmm_id,
lat=metadata.WMM_Latitude[0],
lon=metadata.WMM_Longitude[0],
elev=metadata.WMM_Elevation[0])
return mast
def masts_from_project(self, project):
'''Download an.MetMasts from M2D2 for a given project
:Parameters:
project_name: str
Project name in M2D2
:Returns:
out: List of an.MetMasts with data and metadata from M2D2 for a given project
'''
masts = self.masts()
projects = masts.index.get_level_values('Project').unique().tolist()
assert project in projects, f'Project {project} not found in M2D2'.format(project)
wmm_ids = masts.loc[project,:].index.get_level_values('wmm_id').sort_values().unique().tolist()
masts = [self.mast_from_wmm_id(wmm_id) for wmm_id in wmm_ids]
return masts
# Define Turbine class
class Turbine(object):
'''Class to connect to EDF Wind Turbine database
'''
def __init__(self):
'''Data structure for connecting to and downloading data from M2D2. Convention is:
import anemoi as an
turb_db = an.io.database.Turbine()
:Parameters:
:Returns:
out: an.Turbine object connected to Turbine database
'''
self.database = 'Turbine'
server = '10.1.15.53'
db = 'Turbine_DB_BE'
conn_str = 'DRIVER={SQL Server}; SERVER=%s; DATABASE=%s; Trusted_Connection=yes' %(server, db)
self.conn_str = conn_str #Assign connection string
try:
self.conn = pyodbc.connect(self.conn_str) #Apply connection string to connect to database
except:
print('Database connection error: you either don\'t have permission to the database or aren\'t signed onto the VPN')
def is_connected(self, database):
return self.database == database
def metadata(self):
'''Get turbine model metadata'''
assert self.is_connected('Turbine'), 'Trying to query the Turbine DB without being connected.'
sql_query_turbines = '''
SELECT [TUR_Manufacturer]
,[TUR_RatedOutputkW]
,[TPC_MaxOutput]
,[TUR_RotorDia]
,[TUR_Model]
,[AllHubHeights]
,[TPC_DocumentDate]
,[TUR_ID]
,[IECClass]
,[TPG_ID]
,[TPG_Name]
,[TPC_ID]
,[TVR_VersionName]
,[TPC_dbalevel]
,[TPC_TIScenario]
,[TPC_BinType]
,[TTC_ID]
,[TRPMC_ID]
,[P_ID]
,[P_Name]
FROM [Turbine_DB_BE].[NodeEstimate].[AllPowerCurves]
WHERE TPC_Type = 'Manufacturer General Spec'
'''
turbines = pd.read_sql(sql_query_turbines, self.conn)
return turbines
def power_curve_from_tpc_id(self, tpc_id):
'''Get turbine model metadata'''
assert self.is_connected('Turbine'), 'Trying to query the Turbine DB without being connected.'
sql_query_thrust_curve = '''
SELECT TPCD_AirDensity,
TPCD_WindSpeedBin,
TPCD_OutputKW
FROM TPCDETAILS
WHERE TPC_id = {} AND TPCD_IsDeleted = 0;
'''.format(tpc_id)
thrust_curve = pd.read_sql(sql_query_thrust_curve, self.conn)
return thrust_curve
def trust_curve_from_ttc_id(self, ttc_id):
'''Get turbine model metadata'''
assert self.is_connected('Turbine'), 'Trying to query the Turbine DB without being connected.'
sql_query_thrust_curve = '''
SELECT TTCD_AirDensity,
TTCD_WindSpeedBin,
TTCD_ThrustValue
FROM TTCDETAILS
WHERE TTC_id = {} AND TTCD_IsDeleted = 0;
'''.format(ttc_id)
thrust_curve = pd.read_sql(sql_query_thrust_curve, self.conn)
return thrust_curve
# Define Padre class
class Padre(object):
'''Class to connect to PRE Padre database
'''
def __init__(self, database='PADREScada', conn_str=None, conn=None, domino=False):
'''Data structure with both database name and connection string.
:Parameters:
database: string, default None
Name of the padre database to connect to
conn_str: string, default None
SQL connection string needed to connect to the database
conn: object, default None
SQL connection object to database
'''
self.database = database
if self.database == 'PADREScada':
server = '10.1.106.44'
db = 'PADREScada'
elif self.database == 'PadrePI':
server = '10.1.106.44'
db = 'PADREScada'
conn_str = 'DRIVER={SQL Server}; SERVER=%s; DATABASE=%s; Trusted_Connection=yes' %(server, db)
self.conn_str = conn_str
try:
self.conn = pyodbc.connect(self.conn_str)
except:
print('Database connection error: you either don\'t have permission to the database or aren\'t signed onto the VPN')
def is_connected(self, database):
return self.database == database
def assets(self, project=None, turbines_only=False):
'''Returns:
DataFrame of all turbines within Padre
'''
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve turbines. Use anemoi.DataBase(database="Padre")')
sql_query_assets = '''
SELECT [AssetKey]
,Projects.[ProjectName]
,[AssetType]
,[AssetName]
,Turbines.[Latitude]
,Turbines.[Longitude]
,[elevation_mt]
FROM [PADREScada].[dbo].[Asset] as Turbines
WITH (NOLOCK)
INNER JOIN [PADREScada].[dbo].[Project] as Projects on Turbines.ProjectKey = Projects.ProjectKey
'''
assets = pd.read_sql(sql_query_assets, self.conn)
assets.set_index(['ProjectName', 'AssetName'], inplace=True)
assets.sort_index(axis=0, inplace=True)
if turbines_only:
assets = assets.loc[assets.AssetType == 'Turbine', :]
assets.drop('AssetType', axis=1, inplace=True)
if project is not None:
assets = assets.loc[project, :]
return assets
def operational_projects(self):
'''Returns:
List of all projects within Padre
'''
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve projects. Use anemoi.DataBase(database="Padre")')
padre_project_query = """
SELECT [ProjectKey]
,[ProjectName]
,[State]
,[NamePlateCapacity]
,[NumGenerators]
,[latitude]
,[longitude]
,[DateCOD]
FROM [PADREScada].[dbo].[Project]
WHERE technology = 'Wind'"""
projects = pd.read_sql(padre_project_query, self.conn)
projects.set_index('ProjectName', inplace=True)
return projects
def turbine_categorizations(self, category_type='EDF'):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve turbines. Use anemoi.DataBase(database="Padre")')
padre_cetegory_query = """
SELECT [CategoryKey]
,[StringName]
FROM [PADREScada].[dbo].[Categories]
WHERE CategoryType = '%s'""" %category_type
categories = pd.read_sql(padre_cetegory_query, self.conn)
categories.set_index('CategoryKey', inplace=True)
return categories
def QCd_turbine_data(self, asset_key):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve met masts. Use anemoi.DataBase(database="Padre")')
turbine_data_query = '''
SELECT [TimeStampLocal]
,[Average_Nacelle_Wdspd]
,[Average_Active_Power]
,[Average_Ambient_Temperature]
,[IEC Category]
,[EDF Category]
,[Expected Power (kW)]
,[Expected Energy (kWh)]
,[EnergyDelta (kWh)]
,[EnergyDelta (MWh)]
FROM [PADREScada].[dbo].[vw_10mDataBI]
WITH (NOLOCK)
WHERE [assetkey] = %i''' %asset_key
turbine_data = pd.read_sql(turbine_data_query, self.conn)
turbine_data['TimeStampLocal'] = pd.to_datetime(turbine_data['TimeStampLocal'], format='%Y-%m-%d %H:%M:%S')
turbine_data.set_index('TimeStampLocal', inplace=True)
turbine_data.sort_index(axis=0, inplace=True)
turbine_data = turbine_data.groupby(turbine_data.index).first()
return turbine_data
def raw_turbine_data(self, asset_key, start_date=None, end_date=None):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve met masts. Use anemoi.DataBase(database="Padre")')
turbine_data_query = '''
SELECT
[TimeStampLocal]
,[Average_Nacelle_Wdspd]
,[Average_Active_Power]
,[Average_Nacelle_Direction]
,[Average_Blade_Pitch]
,[Minimum_Blade_Pitch]
,[Maximum_Blade_Pitch]
,[Average_Rotor_Speed]
,[Minimum_Rotor_Speed]
,[Maximum_Rotor_Speed]
,[Average_Ambient_Temperature]
,coalesce([IECStringKey_Manual]
,[IECStringKey_FF]
,[IECStringKey_Default]) IECKey
,coalesce([EDFStringKey_Manual]
,[EDFStringKey_FF]
,[EDFStringKey_Default]) EDFKey
,coalesce([State_and_Fault_Manual]
,[State_and_Fault_FF]
,[State_and_Fault]) State_and_Fault
FROM [PADREScada].[dbo].[WTGCalcData10m]
WITH (NOLOCK)
WHERE [assetkey] = {} {}'''.format(asset_key, return_between_date_query_string(start_date, end_date))
turbine_data = pd.read_sql(turbine_data_query, self.conn)
turbine_data['TimeStampLocal'] = pd.to_datetime(turbine_data['TimeStampLocal'], format='%Y-%m-%d %H:%M:%S')
turbine_data.set_index('TimeStampLocal', inplace=True)
turbine_data.sort_index(axis=0, inplace=True)
turbine_data = turbine_data.groupby(turbine_data.index).first()
return turbine_data
def raw_turbine_expected_energy(self, asset_key):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve met masts. Use anemoi.DataBase(database="Padre")')
turbine_data_query = '''
SELECT
[TimeStampLocal]
,[Expected_Power_NTF]
,[Expected_Energy_NTF]
,[Expected_Power_RefMet]
,[Expected_Energy_RefMet]
,[Expected_Power_Uncorr]
,[Expected_Energy_Uncorr]
,[Expected_Power_DensCorr]
,[Expected_Energy_DensCorr]
,[Expected_Power_AvgMet]
,[Expected_Energy_AvgMet]
,[Expected_Power_ProxyWTGs]
,[Expected_Energy_ProxyWTGs]
,[Expected_Power_MPC]
,[Expected_Energy_MPC]
FROM [PADREScada].[dbo].[WTGCalcData10m]
WITH (NOLOCK)
WHERE [assetkey] = {}'''.format(asset_key)
turbine_data = pd.read_sql(turbine_data_query, self.conn)
turbine_data['TimeStampLocal'] = pd.to_datetime(turbine_data['TimeStampLocal'], format='%Y-%m-%d %H:%M:%S')
turbine_data.set_index('TimeStampLocal', inplace=True)
turbine_data.sort_index(axis=0, inplace=True)
turbine_data = turbine_data.groupby(turbine_data.index).first()
return turbine_data
def senvion_event_logs(self, project_id):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve met masts. Use anemoi.DataBase(database="Padre")')
sql_query = '''
SELECT [assetkey]
,[TimeStamp]
,[statuscode]
,[incomingphasingoutreset]
FROM [PADREScada].[dbo].[SenvionEventLog]
WHERE projectkey = {} and incomingphasingoutreset != 'Reset'
ORDER BY assetkey, TimeStamp
'''.format(project_id)
event_log = | pd.read_sql(sql_query, self.conn) | pandas.read_sql |
#!/usr/bin/python3
import sys
input_shortnames = sys.argv[1:-1:2]
input_quast_csvs = sys.argv[2:-1:2]
output_file = sys.argv[-1]
from os import path
import pandas
df = pandas.DataFrame(columns = ["experiment", "x", "y"])
for shortname, quast_csv in zip(input_shortnames, input_quast_csvs):
frame = | pandas.read_csv(quast_csv, names=["x", "y"]) | pandas.read_csv |
"""Tests for the sdv.constraints.tabular module."""
import uuid
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test___init___strict_false(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is False
assert instance._high_is_scalar is None
assert instance._low_is_scalar is None
assert instance._drop is None
def test___init___all_parameters_passed(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
- strict = True
- drop = 'high'
- high_is_scalar = True
- low_is_scalar = False
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._stric == True
- instance._drop = 'high'
- instance._high_is_scalar = True
- instance._low_is_scalar = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True, drop='high',
high_is_scalar=True, low_is_scalar=False)
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is True
assert instance._high_is_scalar is True
assert instance._low_is_scalar is False
assert instance._drop == 'high'
def test_fit__low_is_scalar_is_none_determined_as_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if low is
a scalar if ``_low_is_scalar`` is None.
Input:
- Table without ``low`` in columns.
Side Effect:
- ``_low_is_scalar`` should be set to ``True``.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._low_is_scalar is True
def test_fit__low_is_scalar_is_none_determined_as_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if low is
a column name if ``_low_is_scalar`` is None.
Input:
- Table with ``low`` in columns.
Side Effect:
- ``_low_is_scalar`` should be set to ``False``.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._low_is_scalar is False
def test_fit__high_is_scalar_is_none_determined_as_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if high is
a scalar if ``_high_is_scalar`` is None.
Input:
- Table without ``high`` in columns.
Side Effect:
- ``_high_is_scalar`` should be set to ``True``.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._high_is_scalar is True
def test_fit__high_is_scalar_is_none_determined_as_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if high is
a column name if ``_high_is_scalar`` is None.
Input:
- Table with ``high`` in columns.
Side Effect:
- ``_high_is_scalar`` should be set to ``False``.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._high_is_scalar is False
def test_fit__high_is_scalar__low_is_scalar_raises_error(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should raise an error if
`_low_is_scalar` and `_high_is_scalar` are true.
Input:
- Table with one column.
Side Effect:
- ``TypeError`` is raised.
"""
# Setup
instance = GreaterThan(low=1, high=2)
# Run / Asserts
table_data = pd.DataFrame({'a': [1, 2, 3]})
with pytest.raises(TypeError):
instance.fit(table_data)
def test_fit__column_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'b'
def test_fit__column_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'a'
def test_fit__column_to_reconstruct_default(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'b'
def test_fit__column_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to `low` if ``instance._high_is_scalar`` is ``True``.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'a'
def test_fit__diff_column_one_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_diff_column``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, high_is_scalar=True)
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance.fit(table_data)
# Asserts
assert instance._diff_column == 'a#'
def test_fit__diff_column_multiple_columns(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_diff_column``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._diff_column == 'a#b'
def test_fit_int(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'i'
def test_fit_float(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_datetime(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'M'
def test_fit_type__high_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_high_is_scalar`` is ``True``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_type__low_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` is ``True``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False], name='b')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False], name='a')
pd.testing.assert_series_equal(expected_out, out)
def test_transform_int_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_high(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_low(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_float_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type float.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': | pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
docstring goes here.
:copyright: Copyright 2014 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function
import unittest
from itertools import chain
from neo.test.generate_datasets import fake_neo
import numpy as np
from numpy.testing.utils import assert_array_equal
import quantities as pq
try:
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_index_equal
except ImportError:
HAVE_PANDAS = False
else:
import elephant.pandas_bridge as ep
HAVE_PANDAS = True
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiindexFromDictTestCase(unittest.TestCase):
def test__multiindex_from_dict(self):
inds = {'test1': 6.5,
'test2': 5,
'test3': 'test'}
targ = pd.MultiIndex(levels=[[6.5], [5], ['test']],
labels=[[0], [0], [0]],
names=['test1', 'test2', 'test3'])
res0 = ep._multiindex_from_dict(inds)
self.assertEqual(targ.levels, res0.levels)
self.assertEqual(targ.names, res0.names)
self.assertEqual(targ.labels, res0.labels)
def _convert_levels(levels):
"""Convert a list of levels to the format pandas returns for a MultiIndex.
Parameters
----------
levels : list
The list of levels to convert.
Returns
-------
list
The the level in `list` converted to values like what pandas will give.
"""
levels = list(levels)
for i, level in enumerate(levels):
if hasattr(level, 'lower'):
try:
level = unicode(level)
except NameError:
pass
elif hasattr(level, 'date'):
levels[i] = pd.DatetimeIndex(data=[level])
continue
elif level is None:
levels[i] = pd.Index([])
continue
levels[i] = pd.Index([level])
return levels
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class ConvertValueSafeTestCase(unittest.TestCase):
def test__convert_value_safe__float(self):
targ = 5.5
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__str(self):
targ = 'test'
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__bytes(self):
targ = 'test'
value = b'test'
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
def test__convert_value_safe__numpy_int_scalar(self):
targ = 5
value = np.array(5)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_float_scalar(self):
targ = 5.
value = np.array(5.)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_unicode_scalar(self):
targ = u'test'
value = np.array('test', dtype='U')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_str_scalar(self):
targ = u'test'
value = np.array('test', dtype='S')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__quantity_scalar(self):
targ = (10., 'ms')
value = 10. * pq.ms
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res[0], 'dtype'))
self.assertFalse(hasattr(res[0], 'units'))
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class SpiketrainToDataframeTestCase(unittest.TestCase):
def test__spiketrain_to_dataframe__parents_empty(self):
obj = fake_neo('SpikeTrain', seed=0)
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, child_first=False)
res3 = ep.spiketrain_to_dataframe(obj, parents=True)
res4 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=True)
res5 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=False)
res6 = ep.spiketrain_to_dataframe(obj, parents=False)
res7 = ep.spiketrain_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
self.assertEqual(len(obj), len(res4.index))
self.assertEqual(len(obj), len(res5.index))
self.assertEqual(len(obj), len(res6.index))
self.assertEqual(len(obj), len(res7.index))
self.assertEqual(len(obj), len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(['spike_number'], res4.index.names)
self.assertEqual(['spike_number'], res5.index.names)
self.assertEqual(['spike_number'], res6.index.names)
self.assertEqual(['spike_number'], res7.index.names)
self.assertEqual(['spike_number'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, parents=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=True)
res3 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=True)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, child_first=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class EventToDataframeTestCase(unittest.TestCase):
def test__event_to_dataframe__parents_empty(self):
obj = fake_neo('Event', seed=42)
res0 = ep.event_to_dataframe(obj)
res1 = ep.event_to_dataframe(obj, child_first=True)
res2 = ep.event_to_dataframe(obj, child_first=False)
res3 = ep.event_to_dataframe(obj, parents=True)
res4 = ep.event_to_dataframe(obj, parents=True, child_first=True)
res5 = ep.event_to_dataframe(obj, parents=True, child_first=False)
res6 = ep.event_to_dataframe(obj, parents=False)
res7 = ep.event_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.event_to_dataframe(obj, parents=False, child_first=False)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res3.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res4.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res5.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res6.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res7.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(['times'], res3.index.names)
self.assertEqual(['times'], res4.index.names)
self.assertEqual(['times'], res5.index.names)
self.assertEqual(['times'], res6.index.names)
self.assertEqual(['times'], res7.index.names)
self.assertEqual(['times'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj, parents=False)
res1 = ep.event_to_dataframe(obj, parents=False, child_first=False)
res2 = ep.event_to_dataframe(obj, parents=False, child_first=True)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj)
res1 = ep.event_to_dataframe(obj, child_first=True)
res2 = ep.event_to_dataframe(obj, parents=True)
res3 = ep.event_to_dataframe(obj, parents=True, child_first=True)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(['times'], res3.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj, child_first=False)
res1 = ep.event_to_dataframe(obj, parents=True, child_first=False)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class EpochToDataframeTestCase(unittest.TestCase):
def test__epoch_to_dataframe__parents_empty(self):
obj = fake_neo('Epoch', seed=42)
res0 = ep.epoch_to_dataframe(obj)
res1 = ep.epoch_to_dataframe(obj, child_first=True)
res2 = ep.epoch_to_dataframe(obj, child_first=False)
res3 = ep.epoch_to_dataframe(obj, parents=True)
res4 = ep.epoch_to_dataframe(obj, parents=True, child_first=True)
res5 = ep.epoch_to_dataframe(obj, parents=True, child_first=False)
res6 = ep.epoch_to_dataframe(obj, parents=False)
res7 = ep.epoch_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.epoch_to_dataframe(obj, parents=False, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res3.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res4.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res5.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res6.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res7.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual([u'durations', u'times'], res3.index.names)
self.assertEqual([u'durations', u'times'], res4.index.names)
self.assertEqual([u'durations', u'times'], res5.index.names)
self.assertEqual([u'durations', u'times'], res6.index.names)
self.assertEqual([u'durations', u'times'], res7.index.names)
self.assertEqual([u'durations', u'times'], res8.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
self.assertEqual(2, len(res3.index.levels))
self.assertEqual(2, len(res4.index.levels))
self.assertEqual(2, len(res5.index.levels))
self.assertEqual(2, len(res6.index.levels))
self.assertEqual(2, len(res7.index.levels))
self.assertEqual(2, len(res8.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
assert_array_equal(targindex, res3.index.levels)
assert_array_equal(targindex, res4.index.levels)
assert_array_equal(targindex, res5.index.levels)
assert_array_equal(targindex, res6.index.levels)
assert_array_equal(targindex, res7.index.levels)
assert_array_equal(targindex, res8.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj, parents=False)
res1 = ep.epoch_to_dataframe(obj, parents=False, child_first=True)
res2 = ep.epoch_to_dataframe(obj, parents=False, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj)
res1 = ep.epoch_to_dataframe(obj, child_first=True)
res2 = ep.epoch_to_dataframe(obj, parents=True)
res3 = ep.epoch_to_dataframe(obj, parents=True, child_first=True)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual([u'durations', u'times'], res3.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
self.assertEqual(2, len(res3.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
assert_array_equal(targindex, res3.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj, child_first=False)
res1 = ep.epoch_to_dataframe(obj, parents=True, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiSpiketrainsToDataframeTestCase(unittest.TestCase):
def setUp(self):
if hasattr(self, 'assertItemsEqual'):
self.assertCountEqual = self.assertItemsEqual
def test__multi_spiketrains_to_dataframe__single(self):
obj = fake_neo('SpikeTrain', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res4 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res5 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
res6 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res7 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
res8 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
targ = ep.spiketrain_to_dataframe(obj)
keys = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = 1
targlen = len(obj)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targwidth, len(res4.columns))
self.assertEqual(targwidth, len(res5.columns))
self.assertEqual(targwidth, len(res6.columns))
self.assertEqual(targwidth, len(res7.columns))
self.assertEqual(targwidth, len(res8.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertEqual(targlen, len(res4.index))
self.assertEqual(targlen, len(res5.index))
self.assertEqual(targlen, len(res6.index))
self.assertEqual(targlen, len(res7.index))
self.assertEqual(targlen, len(res8.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
self.assertCountEqual(keys, res4.columns.names)
self.assertCountEqual(keys, res5.columns.names)
self.assertCountEqual(keys, res6.columns.names)
self.assertCountEqual(keys, res7.columns.names)
self.assertCountEqual(keys, res8.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_array_equal(targ.values, res4.values)
assert_array_equal(targ.values, res5.values)
assert_array_equal(targ.values, res6.values)
assert_array_equal(targ.values, res7.values)
assert_array_equal(targ.values, res8.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
assert_frame_equal(targ, res4)
assert_frame_equal(targ, res5)
assert_frame_equal(targ, res6)
assert_frame_equal(targ, res7)
assert_frame_equal(targ, res8)
def test__multi_spiketrains_to_dataframe__unit_default(self):
obj = fake_neo('Unit', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = obj.spiketrains
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__segment_default(self):
obj = fake_neo('Segment', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = obj.spiketrains
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__block_noparents(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_spiketrains_to_dataframe__block_parents_childfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_spiketrains_to_dataframe__block_parents_parentfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_spiketrains_to_dataframe__list_noparents(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_spiketrains_to_dataframe__list_parents_childfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_spiketrains_to_dataframe__list_parents_parentfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_spiketrains_to_dataframe__tuple_default(self):
obj = tuple(fake_neo('Block', seed=i, n=3) for i in range(3))
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__iter_default(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(iter(obj))
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__dict_default(self):
obj = dict((i, fake_neo('Block', seed=i, n=3)) for i in range(3))
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in
obj.values())
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiEventsToDataframeTestCase(unittest.TestCase):
def setUp(self):
if hasattr(self, 'assertItemsEqual'):
self.assertCountEqual = self.assertItemsEqual
def test__multi_events_to_dataframe__single(self):
obj = fake_neo('Event', seed=0, n=5)
res0 = ep.multi_events_to_dataframe(obj)
res1 = ep.multi_events_to_dataframe(obj, parents=False)
res2 = ep.multi_events_to_dataframe(obj, parents=True)
res3 = ep.multi_events_to_dataframe(obj, child_first=True)
res4 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=True)
res5 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=True)
res6 = ep.multi_events_to_dataframe(obj, child_first=False)
res7 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=False)
res8 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=False)
targ = ep.event_to_dataframe(obj)
keys = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = 1
targlen = min(len(obj.times), len(obj.labels))
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targwidth, len(res4.columns))
self.assertEqual(targwidth, len(res5.columns))
self.assertEqual(targwidth, len(res6.columns))
self.assertEqual(targwidth, len(res7.columns))
self.assertEqual(targwidth, len(res8.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertEqual(targlen, len(res4.index))
self.assertEqual(targlen, len(res5.index))
self.assertEqual(targlen, len(res6.index))
self.assertEqual(targlen, len(res7.index))
self.assertEqual(targlen, len(res8.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
self.assertCountEqual(keys, res4.columns.names)
self.assertCountEqual(keys, res5.columns.names)
self.assertCountEqual(keys, res6.columns.names)
self.assertCountEqual(keys, res7.columns.names)
self.assertCountEqual(keys, res8.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_array_equal(targ.values, res4.values)
assert_array_equal(targ.values, res5.values)
assert_array_equal(targ.values, res6.values)
assert_array_equal(targ.values, res7.values)
assert_array_equal(targ.values, res8.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
assert_frame_equal(targ, res4)
assert_frame_equal(targ, res5)
assert_frame_equal(targ, res6)
assert_frame_equal(targ, res7)
assert_frame_equal(targ, res8)
def test__multi_events_to_dataframe__segment_default(self):
obj = fake_neo('Segment', seed=0, n=5)
res0 = ep.multi_events_to_dataframe(obj)
objs = obj.events
targ = [ep.event_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_events_to_dataframe__block_noparents(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_events_to_dataframe(obj, parents=False)
res1 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=False)
objs = obj.list_children_by_class('Event')
targ = [ep.event_to_dataframe(iobj, parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_events_to_dataframe__block_parents_childfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_events_to_dataframe(obj)
res1 = ep.multi_events_to_dataframe(obj, parents=True)
res2 = ep.multi_events_to_dataframe(obj, child_first=True)
res3 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=True)
objs = obj.list_children_by_class('Event')
targ = [ep.event_to_dataframe(iobj, parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_events_to_dataframe__block_parents_parentfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_events_to_dataframe(obj, child_first=False)
res1 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=False)
objs = obj.list_children_by_class('Event')
targ = [ep.event_to_dataframe(iobj, parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_events_to_dataframe__list_noparents(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_events_to_dataframe(obj, parents=False)
res1 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=False)
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj, parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
| assert_frame_equal(targ, res2) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2019 - 2020
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Module that provides functions to prepare the IMPAC dataset. IMPAC stands for
IMaging-PsychiAtry Challenge: predicting autism which is a data challenge on
Autism Spectrum Disorder detection:
https://paris-saclay-cds.github.io/autism_challenge.
"""
# Imports
import re
import os
import json
import glob
import shutil
import logging
import requests
import zipfile
import hashlib
import warnings
from collections import namedtuple
from collections import OrderedDict
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
from pynet.datasets import Fetchers
try:
from nilearn.connectome import ConnectivityMeasure
except:
warnings.warn("You need to install nilearn.")
# Global parameters
ATLAS = ("basc064", "basc122", "basc197", "craddock_scorr_mean",
"harvard_oxford_cort_prob_2mm", "msdl", "power_2011")
ARCHIVE = {
atlas: 'https://zenodo.org/record/3625740/files/{0}.zip'.format(atlas)
for atlas in ATLAS}
CHECKSUM = {
"basc064":
"75eb5ee72344d11f056551310a470d00227fac3e87b7205196f77042fcd434d0",
"basc122":
"2d0d2c2338f9114877a0a1eb695e73f04fc664065d1fb75cff8d59f6516b0ec7",
"basc197":
"68135bb8e89b5b3653e843745d8e5d0e92876a5536654eaeb9729c9a52ab00e9",
"craddock_scorr_mean":
"634e0bb07beaae033a0f1615aa885ba4cb67788d4a6e472fd432a1226e01b49b",
"harvard_oxford_cort_prob_2mm":
"638559dc4c7de25575edc02e58404c3f2600556239888cbd2e5887316def0e74",
"msdl":
"fd241bd66183d5fc7bdf9a115d7aeb9a5fecff5801cd15a4e5aed72612916a97",
"power_2011":
"d1e3cd8eaa867079fe6b24dfaee08bd3b2d9e0ebbd806a2a982db5407328990a"}
URL = "https://raw.githubusercontent.com/ramp-kits/autism/master/data/"
URLS = [URL + name for name in ["anatomy.csv", "anatomy_qc.csv",
"fmri_filename.csv", "fmri_qc.csv",
"fmri_repetition_time.csv",
"participants.csv", "test.csv", "train.csv"]]
Item = namedtuple("Item", ["input_path", "output_path", "metadata_path",
"labels", "nb_features"])
logger = logging.getLogger("pynet")
def _sha256(path):
""" Calculate the sha256 hash of the file at path.
"""
sha256hash = hashlib.sha256()
chunk_size = 8192
with open(path, "rb") as f:
while True:
buffer = f.read(chunk_size)
if not buffer:
break
sha256hash.update(buffer)
return sha256hash.hexdigest()
def _check_and_unzip(zip_file, atlas, atlas_directory):
checksum_download = _sha256(zip_file)
if checksum_download != CHECKSUM[atlas]:
os.remove(zip_file)
raise IOError("The file downloaded was corrupted. Try again "
"to execute this fetcher.")
logger.info("Decompressing the archive...")
zip_ref = zipfile.ZipFile(zip_file, "r")
zip_ref.extractall(atlas_directory)
zip_ref.close()
def _download_fmri_data(atlas, outdir):
logger.info("Downloading the data from {0}...".format(ARCHIVE[atlas]))
zip_file = os.path.join(outdir, atlas + ".zip")
if os.path.isfile(zip_file):
logger.info("'{0}' already downloaded!".format(zip_file))
else:
response = requests.get(ARCHIVE[atlas])
with open(zip_file, "wb") as of:
of.write(response.content)
atlas_directory = os.path.join(outdir, "data", "fmri")
if not os.path.isdir(atlas_directory):
_check_and_unzip(zip_file, atlas, atlas_directory)
def fetch_fmri_time_series(outdir, atlas="all"):
""" Fetch the time-series extracted from the fMRI data using a specific
atlas.
Parameters
----------
outdir: str
the detination folder.
atlas : string, default='all'
The name of the atlas used during the extraction. The possibilities
are:
* `'basc064`, `'basc122'`, `'basc197'`: BASC parcellations with 64,
122, and 197 regions [1]_;
* `'craddock_scorr_mean'`: Ncuts parcellations [2]_;
* `'harvard_oxford_cort_prob_2mm'`: Harvard-Oxford anatomical
parcellations;
* `'msdl'`: MSDL functional atlas [3]_;
* `'power_2011'`: Power atlas [4]_.
References
----------
.. [1] <NAME>, et al. "Multi-level bootstrap analysis of stable
clusters in resting-state fMRI." Neuroimage 51.3 (2010): 1126-1139.
.. [2] <NAME>, et al. "A whole brain fMRI atlas generated
via spatially constrained spectral clustering." Human brain mapping
33.8 (2012): 1914-1928.
.. [3] <NAME>, et al. "Multi-subject dictionary learning to
segment an atlas of brain spontaneous activity." Biennial International
Conference on Information Processing in Medical Imaging. Springer,
Berlin, Heidelberg, 2011.
.. [4] Power, <NAME>., et al. "Functional network organization of the
human brain." Neuron 72.4 (2011): 665-678.
"""
if atlas == "all":
for single_atlas in ATLAS:
_download_fmri_data(single_atlas, outdir)
elif atlas in ATLAS:
_download_fmri_data(atlas, outdir)
else:
raise ValueError(
"'atlas' should be one of {0}. Got {1} instead.".format(
ATLAS, atlas))
logger.info("Downloading completed...")
def _load_fmri(fmri_filenames):
""" Load time-series extracted from the fMRI using a specific atlas.
"""
return np.array([pd.read_csv(subject_filename,
header=None).values
for subject_filename in fmri_filenames])
class FeatureExtractor(BaseEstimator, TransformerMixin):
""" Make a transformer which will load the time series and compute the
connectome matrix.
"""
def __init__(self):
self.transformer_fmri = make_pipeline(
FunctionTransformer(func=_load_fmri, validate=False),
ConnectivityMeasure(kind="tangent", vectorize=True))
def fit(self, X_df, y, datadir):
fmri_filenames = [path.replace(".", datadir, 1)
for path in X_df["fmri_basc122"]]
self.transformer_fmri.fit(fmri_filenames, y)
return self
def transform(self, X_df, datadir):
fmri_filenames = [path.replace(".", datadir, 1)
for path in X_df["fmri_basc122"]]
X_connectome = self.transformer_fmri.transform(fmri_filenames)
X_connectome = pd.DataFrame(X_connectome, index=X_df.index)
X_connectome.columns = ["connectome_{0}".format(i)
for i in range(X_connectome.columns.size)]
X_anatomy = X_df[[col for col in X_df.columns
if col.startswith("anatomy")]]
X_anatomy = X_anatomy.drop(columns="anatomy_select")
logger.debug(" X connectome: {0}".format(X_connectome.shape))
logger.debug(" X anatomy: {0}".format(X_anatomy.shape))
return pd.concat([X_connectome, X_anatomy], axis=1)
@Fetchers.register
def fetch_impac(datasetdir, mode="train", dtype="all"):
""" Fetch/prepare the IMPAC dataset for pynet.
To compute the functional connectivity using the rfMRI data, we use the
BASC atlas with 122 ROIs.
Parameters
----------
datasetdir: str
the dataset destination folder.
mode: str
ask the 'train' or 'test' dataset.
dtype: str, default 'all'
the features type: 'anatomy', 'fmri', or 'all'.
Returns
-------
item: namedtuple
a named tuple containing 'input_path', 'output_path', and
'metadata_path'.
"""
logger.info("Loading impac dataset.")
if not os.path.isdir(datasetdir):
os.mkdir(datasetdir)
train_desc_path = os.path.join(datasetdir, "pynet_impac_train.tsv")
selected_input_path = os.path.join(
datasetdir, "pynet_impac_inputs_selection.npy")
train_input_path = os.path.join(
datasetdir, "pynet_impac_inputs_train.npy")
train_output_path = os.path.join(
datasetdir, "pynet_impac_outputs_train.npy")
test_desc_path = os.path.join(datasetdir, "pynet_impac_test.tsv")
test_input_path = os.path.join(
datasetdir, "pynet_impac_inputs_test.npy")
test_output_path = os.path.join(
datasetdir, "pynet_impac_outputs_test.npy")
if not os.path.isfile(train_desc_path):
fetch_fmri_time_series(datasetdir, atlas="basc122")
data = []
sets = {}
for url in URLS:
basename = url.split("/")[-1]
name = basename.split(".")[0]
local_file = os.path.join(datasetdir, basename)
if not os.path.isfile(local_file):
response = requests.get(url, stream=True)
with open(local_file, "wt") as out_file:
out_file.write(response.text)
del response
else:
logger.info("'{0}' already downloaded!".format(basename))
if name not in ("train", "test"):
prefix = name.split("_")[0]
df = | pd.read_csv(local_file, index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Automated Tool for Optimized Modelling (ATOM)
Author: Mavs
Description: Module containing utility constants, functions and classes.
"""
import logging
import math
import pprint
import sys
from collections import deque
from collections.abc import MutableMapping
from copy import copy
from datetime import datetime
from functools import wraps
from inspect import signature
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.gridspec import GridSpec
from scipy import sparse
from shap import Explainer
from sklearn.inspection._partial_dependence import (
_grid_from_X, _partial_dependence_brute,
)
from sklearn.metrics import (
SCORERS, confusion_matrix, make_scorer, matthews_corrcoef,
)
from sklearn.utils import _print_elapsed_time, _safe_indexing
# Global constants ================================================= >>
SEQUENCE = (list, tuple, np.ndarray, pd.Series)
# Variable types
INT = Union[int, np.integer]
FLOAT = Union[float, np.float]
SCALAR = Union[INT, FLOAT]
SEQUENCE_TYPES = Union[SEQUENCE]
X_TYPES = Union[iter, dict, list, tuple, np.ndarray, sparse.spmatrix, pd.DataFrame]
Y_TYPES = Union[INT, str, SEQUENCE_TYPES]
# Attributes shared between atom and a pd.DataFrame
DF_ATTRS = (
"size",
"head",
"tail",
"loc",
"iloc",
"describe",
"iterrows",
"dtypes",
"at",
"iat",
"memory_usage",
"empty",
"ndim",
)
# List of custom metrics for the evaluate method
CUSTOM_METRICS = (
"cm",
"tn",
"fp",
"fn",
"tp",
"lift",
"fpr",
"tpr",
"fnr",
"tnr",
"sup",
)
# Acronyms for some common scorers
SCORERS_ACRONYMS = dict(
ap="average_precision",
ba="balanced_accuracy",
auc="roc_auc",
logloss="neg_log_loss",
ev="explained_variance",
me="max_error",
mae="neg_mean_absolute_error",
mse="neg_mean_squared_error",
rmse="neg_root_mean_squared_error",
msle="neg_mean_squared_log_error",
mape="neg_mean_absolute_percentage_error",
medae="neg_median_absolute_error",
poisson="neg_mean_poisson_deviance",
gamma="neg_mean_gamma_deviance",
)
# Functions ======================================================== >>
def flt(item):
"""Utility to reduce sequences with just one item."""
if isinstance(item, SEQUENCE) and len(item) == 1:
return item[0]
else:
return item
def lst(item):
"""Utility used to make sure an item is iterable."""
if isinstance(item, (dict, CustomDict, *SEQUENCE)):
return item
else:
return [item]
def it(item):
"""Utility to convert rounded floats to int."""
try:
is_equal = int(item) == float(item)
except ValueError: # Item may not be numerical
return item
return int(item) if is_equal else float(item)
def divide(a, b):
"""Divide two numbers and return 0 if division by zero."""
return np.divide(a, b) if b != 0 else 0
def merge(X, y):
"""Merge a pd.DataFrame and pd.Series into one dataframe."""
return X.merge(y.to_frame(), left_index=True, right_index=True)
def variable_return(X, y):
"""Return one or two arguments depending on which is None."""
if y is None:
return X
elif X is None:
return y
else:
return X, y
def is_multidim(df):
"""Check if the dataframe contains a multidimensional column."""
return df.columns[0] == "multidim feature" and len(df.columns) <= 2
def is_sparse(df):
"""Check if the dataframe contains any sparse columns."""
return any( | pd.api.types.is_sparse(df[col]) | pandas.api.types.is_sparse |
import streamlit as st
import pandas as pd
from simpletransformers.question_answering import QuestionAnsweringModel
from simpletransformers.streamlit.streamlit_utils import get, simple_transformers_model
QA_ANSWER_WRAPPER = """{} <span style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 0.25rem; background: #a6e22d">{}</span> {}""" # noqa
QA_EMPTY_ANSWER_WRAPPER = """{} <span style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 0.25rem; background: #FF0000">{}</span> {}""" # noqa
def get_states(model, session_state=None):
if session_state:
setattr(session_state, "max_answer_length", model.args.max_answer_length)
setattr(session_state, "max_query_length", model.args.max_query_length)
else:
session_state = get(
max_seq_length=model.args.max_seq_length,
max_answer_length=model.args.max_answer_length,
max_query_length=model.args.max_query_length,
)
model.args.max_seq_length = session_state.max_seq_length
model.args.max_answer_length = session_state.max_answer_length
model.args.max_query_length = session_state.max_query_length
return session_state, model
@st.cache(hash_funcs={QuestionAnsweringModel: simple_transformers_model})
def get_prediction(model, context_text, question_text):
to_predict = [{"context": context_text, "qas": [{"id": 0, "question": question_text}]}]
answers, probabilities = model.predict(to_predict)
return answers, probabilities
def qa_viewer(model):
st.sidebar.subheader("Parameters")
try:
session_state, model = get_states(model)
except AttributeError:
session_state = get(
max_seq_length=model.args.max_seq_length,
max_answer_length=model.args.max_answer_length,
max_query_length=model.args.max_query_length,
)
session_state, model = get_states(model, session_state)
model.args.max_seq_length = st.sidebar.slider(
"Max Seq Length", min_value=1, max_value=512, value=model.args.max_seq_length
)
model.args.max_answer_length = st.sidebar.slider(
"Max Answer Length", min_value=1, max_value=512, value=model.args.max_answer_length
)
model.args.max_query_length = st.sidebar.slider(
"Max Query Length", min_value=1, max_value=512, value=model.args.max_query_length
)
model.args.n_best_size = st.sidebar.slider("Number of answers to generate", min_value=1, max_value=20)
st.subheader("Enter context: ")
context_text = st.text_area("", key="context")
st.subheader("Enter question: ")
question_text = st.text_area("", key="question")
if context_text and question_text:
answers, probabilities = get_prediction(model, context_text, question_text)
st.subheader(f"Predictions")
answers = answers[0]["answer"]
context_pieces = context_text.split(answers[0])
if answers[0] != "empty":
if len(context_pieces) == 2:
st.write(
QA_ANSWER_WRAPPER.format(context_pieces[0], answers[0], context_pieces[-1]), unsafe_allow_html=True
)
else:
st.write(
QA_ANSWER_WRAPPER.format(context_pieces[0], answers[0], answers[0].join(context_pieces[1:])),
unsafe_allow_html=True,
)
else:
st.write(QA_EMPTY_ANSWER_WRAPPER.format("", answers[0], ""), unsafe_allow_html=True)
probabilities = probabilities[0]["probability"]
st.subheader("Confidence")
output_df = | pd.DataFrame({"Answer": answers, "Confidence": probabilities}) | pandas.DataFrame |
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
df = pd.read_csv(filename).drop_duplicates().dropna()
# Drop not needed features
df.drop(['id'], axis=1, inplace=True)
df.drop(['lat'], axis=1, inplace=True)
df.drop(['long'], axis=1, inplace=True)
# Edit date to datetime pandas
df['date'] = pd.to_datetime(df['date'], format="%Y%m%dT%f", errors='coerce')
# only positive numbers
lst = ["price", "sqft_living", "sqft_lot", "sqft_above", "yr_built",
"sqft_living15", "sqft_lot15", "bathrooms",
"floors"]
for feature in lst:
df = df[df[feature] > 0]
# checks where there is a basement
df['has_basement'] = np.where(df['sqft_basement'] > 0, 1, 0)
# renovated in the last 10 years
df['new_renovation'] = np.where(pd.DatetimeIndex(df['date']).year - df['yr_renovated'] < 10, 1, 0)
# drop date
df.drop(['date'], axis=1, inplace=True)
# Edit Zip-code to dummies
df = | pd.get_dummies(df, columns=['zipcode']) | pandas.get_dummies |
import pandas as pd
import abc
import numpy as np
from BPMN.TransformationStrategy import SelectRowsStrategy
# abstract base class
class CombineStrategy():
@abc.abstractclassmethod
def combine(self, df_1: pd.DataFrame, df_2: pd.DataFrame) -> pd.DataFrame:
pass
@abc.abstractclassmethod
def get_code(self, df_1: str, df_2: str) -> str:
pass
# joins
class NaturalJoinStrategy(CombineStrategy):
def __init__(self, impl_bool_1: bool = False) -> None:
super().__init__(self)
self.index = impl_bool_1
self.how = "inner"
def combine(self, df_1: pd.DataFrame, df_2: pd.DataFrame) -> pd.DataFrame:
if self._index:
return df_1.join(df_2, how=self.how)
return df_1.merge(df_2, how=self.how)
def get_code(self, df_1: str, df_2: str) -> str:
if self.index:
return f"#join on indcies\n{df_1}.join({df_2}, how = '{self.how}')"
return f"#natural join on columns \n{df_1}.merge({df_2}, how = '{self.how}')"
class LeftNaturalJoinStrategy(NaturalJoinStrategy):
def __init__(self, impl_bool_1: bool = False) -> None:
super().__init__(impl_bool_1)
self.how = "left"
def combine(self, df_1: pd.DataFrame, df_2: pd.DataFrame) -> pd.DataFrame:
df_1 = super().combine(df_1, df_2)
df_1.replace({np.nan: None})
def get_code(self, df_1: str, df_2: str) -> str:
return super().get_code(df_1, df_2) + f"\n{df_1}.replace({{np.nan: None}})"
class RightNaturalJoinStrategy(NaturalJoinStrategy):
def __init__(self, impl_bool_1: bool = False) -> None:
super().__init__(impl_bool_1)
self.how = "right"
def combine(self, df_1: pd.DataFrame, df_2: pd.DataFrame) -> pd.DataFrame:
df_1 = super().combine(df_1, df_2)
df_1.replace({np.nan: None})
def get_code(self, df_1: str, df_2: str) -> str:
return super().get_code(df_1, df_2) + f"\n{df_1}.replace({{np.nan: None}})"
class OuterNaturalJoinStrategy(NaturalJoinStrategy):
def __init__(self, impl_bool_1: bool = False) -> None:
super().__init__(impl_bool_1)
self.how = "outer"
def combine(self, df_1: pd.DataFrame, df_2: pd.DataFrame) -> pd.DataFrame:
df_1 = super().combine(df_1, df_2)
df_1.replace({np.nan: None})
def get_code(self, df_1: str, df_2: str) -> str:
return super().get_code(df_1, df_2) + f"\n{df_1}.replace({{np.nan: None}})"
class JoinOnStrategy(CombineStrategy):
def __init__(self, column: str) -> None:
super().__init__()
self.on = column
self.how = "inner"
def combine(self, df_1: pd.DataFrame, df_2: pd.DataFrame) -> pd.DataFrame:
return df_1.merge(df_2, on=self.on, how=self.how)
def get_code(self, df_1: str, df_2: str) -> str:
return f"""#Here we {self.how }join on {self.on} two Dataframes\n{df_1} = {df_1}.merge({df_2}, on = "{self.on}", how="{self.how}")\n"""
class LeftJoinOnStrategy(JoinOnStrategy):
def __init__(self, column: str) -> None:
super().__init__(column)
self.how = "left"
def combine(self, df_1: pd.DataFrame, df_2: pd.DataFrame) -> pd.DataFrame:
df_1 = super().combine(df_1, df_2)
df_1.replace({np.nan: None})
def get_code(self, df_1: str, df_2: str) -> str:
return super().get_code(df_1, df_2) + f"\n{df_1}.replace({{np.nan: None}})"
class RightJoinOnStrategy(JoinOnStrategy):
def __init__(self, column: str) -> None:
super().__init__(column)
self.how = "right"
def combine(self, df_1: pd.DataFrame, df_2: pd.DataFrame) -> pd.DataFrame:
df_1 = super().combine(df_1, df_2)
df_1.replace({np.nan: None})
def get_code(self, df_1: str, df_2: str) -> str:
return super().get_code(df_1, df_2) + f"\n{df_1}.replace({{np.nan: None}})"
class OuterJoinOnStrategy(JoinOnStrategy):
def __init__(self, column: str) -> None:
super().__init__(column)
self.how = "outer"
def combine(self, df_1: pd.DataFrame, df_2: pd.DataFrame) -> pd.DataFrame:
df_1 = super().combine(df_1, df_2)
df_1.replace({np.nan: None})
def get_code(self, df_1: str, df_2: str) -> str:
return super().get_code(df_1, df_2) + f"\n{df_1}.replace({{np.nan: None}})"
class EquiJoinStrategy(CombineStrategy):
def __init__(self, left_col: str, right_col: str) -> None:
self.how = "inner"
self.right = right_col
self.left = left_col
def combine(self, df_1: pd.DataFrame, df_2: pd.DataFrame) -> pd.DataFrame:
return df_1.merge(df_2, left_on=self.left, right_on=self.right, how=self.how)
def get_code(self, df_1: str, df_2: str) -> str:
return f"""#Here we make an equi join two Dataframes\n{df_1} = {df_1}.merge({df_2},left_on="{self.left}", right_on="{self.right}", how="{self.how}")\n"""
class LeftEquiJoinStrategy(EquiJoinStrategy):
def __init__(self, left_col: str, right_col: str) -> None:
super().__init__(left_col, right_col)
self.how = "left"
def combine(self, df_1: pd.DataFrame, df_2: pd.DataFrame) -> pd.DataFrame:
df_1 = super().combine(df_1, df_2)
df_1.replace({np.nan: None})
def get_code(self, df_1: str, df_2: str) -> str:
return super().get_code(df_1, df_2) + f"\n{df_1}.replace({{np.nan: None}})"
class RightEquiJoinStrategy(EquiJoinStrategy):
def __init__(self, left_col: str, right_col: str) -> None:
super().__init__(left_col, right_col)
self.how = "right"
def combine(self, df_1: pd.DataFrame, df_2: pd.DataFrame) -> pd.DataFrame:
df_1 = super().combine(df_1, df_2)
df_1.replace({np.nan: None})
def get_code(self, df_1: str, df_2: str) -> str:
return super().get_code(df_1, df_2) + f"\n{df_1}.replace({{np.nan: None}})"
class OuterEquiJoinStrategy(EquiJoinStrategy):
def __init__(self, left_col: str, right_col: str) -> None:
super().__init__(left_col, right_col)
self.how = "outer"
def combine(self, df_1: pd.DataFrame, df_2: pd.DataFrame) -> pd.DataFrame:
df_1 = super().combine(df_1, df_2)
df_1.replace({np.nan: None})
def get_code(self, df_1: str, df_2: str) -> str:
return super().get_code(df_1, df_2) + f"\n{df_1}.replace({{np.nan: None}})"
class ThetaStrategy(CombineStrategy):
def __init__(self, query: str) -> None:
super().__init__()
self.query = query
self.filter = SelectRowsStrategy(query)
def combine(self, df_1: pd.DataFrame, df_2: pd.DataFrame) -> pd.DataFrame:
df_1 = df_1.merge(df_2, how="cross")
return self.filter.transform(df_1)
def get_code(self, df_1: str, df_2: str) -> str:
return f"""#Here we do a theta join on {self.query} by crossing and then querying two Dataframes\n{df_1} = {df_1}.merge({df_2}, how = "cross")\n{self.filter.get_code(df_1)}"""
# set things
class ConcatStrategy(CombineStrategy):
def combine(self, df_1: pd.DataFrame, df_2: pd.DataFrame) -> pd.DataFrame:
df_c = pd.concat([df_1, df_2])
return df_c
def get_code(self, df_1: str, df_2: str) -> str:
return f"#Here we concate two Dataframes\n{df_1} = pd.concat([{df_1},{df_2}])\n"
class UnionStrategy(CombineStrategy):
def combine(self, df_1: pd.DataFrame, df_2: pd.DataFrame) -> pd.DataFrame:
df_c = pd.concat([df_1, df_2]).drop_duplicates(keep="first")
return df_c
def get_code(self, df_1: str, df_2: str) -> str:
return f"#Here we concate two Dataframes\n{df_1} = pd.concat([{df_1},{df_2}]).drop_duplicates(keep='first')\n"
class Intersecttrategy(CombineStrategy):
def combine(self, df_1: pd.DataFrame, df_2: pd.DataFrame) -> pd.DataFrame:
return df_1.merge(df_2, on=list(df_1.columns), how="inner")
def get_code(self, df_1: str, df_2: str) -> str:
return f"""#Here we cross two Dataframes\n{df_1} = {df_1}.merge({df_2}, on = list({df_1}.columns) , how = "inner")\n"""
class DiffrenceStrategy(CombineStrategy):
def combine(self, df_1: pd.DataFrame, df_2: pd.DataFrame) -> pd.DataFrame:
return | pd.concat([df_1, df_2]) | pandas.concat |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from trueskill import Rating, rate
sns.set()
sns.set_style('white')
def get_cumulative_ranks_df(games, rolling_average_n=None):
results = []
rank_map = dict()
for game_number, game in enumerate(games):
# Update rankings
for rank, player in enumerate(game.ranking):
if player not in rank_map:
rank_map[player] = []
normalized_rank = rank / (len(game.ranking) - 1)
rank_map[player].append(normalized_rank)
for player, ranks in rank_map.items():
discounted_ranks = ranks if rolling_average_n is None else ranks[-rolling_average_n:]
result = {
'Game Number': game_number,
'Player': player,
'Average Normalized Rank': sum(discounted_ranks) / len(discounted_ranks),
}
results.append(result)
return pd.DataFrame(results)
def plot_cumulative_ranks(games, rolling_average_n=None):
df = get_cumulative_ranks_df(games, rolling_average_n=rolling_average_n)
sns.lineplot(x='Game Number', y='Average Normalized Rank', hue='Player', data=df)
plt.show()
def get_trueskill_df(games):
df_results = []
trueskill_ratings = dict()
for game_number, game in enumerate(games):
for player in game.ranking:
if player not in trueskill_ratings:
trueskill_ratings[player] = Rating()
team_ratings = [[trueskill_ratings[player]] for player in game.ranking]
team_ranks = list(range(len(game.ranking)))
updated_team_ratings = rate(team_ratings, ranks=team_ranks)
updated_ratings = dict(zip(game.ranking, [r[0] for r in updated_team_ratings]))
trueskill_ratings.update(updated_ratings)
for player, trueskill_rating in trueskill_ratings.items():
df_result = {
'Game Number': game_number,
'Player': player,
'TrueSkill': trueskill_rating.mu,
}
df_results.append(df_result)
return pd.DataFrame(df_results)
def plot_trueskill(games):
df = get_trueskill_df(games)
sns.lineplot(x='Game Number', y='TrueSkill', hue='Player', data=df)
plt.show()
def _get_1v1_encoded_df(games):
players = list(set([player for game in games for player in game.ranking]))
results = []
for game in games:
for winner_i in range(len(game.ranking)):
for loser_i in range(winner_i + 1, len(game.ranking)):
winner = game.ranking[winner_i]
loser = game.ranking[loser_i]
result = []
for player in players:
if player == winner:
result.append(1)
elif player == loser:
result.append(-1)
else:
result.append(0)
results.append(result)
return | pd.DataFrame(results, columns=players) | pandas.DataFrame |
# Copyright 2018 <NAME>. All rights reserved.
#
# Licensed under the MIT license
"""
Script for panels of Figure S5 (Comparison with and structure of C elegans network)
"""
import core as c
import analysis as a
from global_defs import GlobalDefs
import os
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as pl
import numpy as np
import h5py
from mo_types import MoTypes
from Figure4 import mpath
from scipy.signal import convolve
from pandas import DataFrame
import pickle
# file definitions
base_path_zf = "./model_data/Adam_1e-4/sepInput_mixTrain/"
paths_512_zf = [f + '/' for f in os.listdir(base_path_zf) if "_3m512_" in f]
base_path_ce = "./model_data/CE_Adam_1e-4/"
paths_512_ce = [f + '/' for f in os.listdir(base_path_ce) if "_3m512_" in f]
if __name__ == "__main__":
save_folder = "./DataFigures/FigureS5/"
if not os.path.exists(save_folder):
os.makedirs(save_folder)
sns.reset_orig()
mpl.rcParams['pdf.fonttype'] = 42
std_zf = c.GradientData.load_standards("gd_training_data.hdf5")
ana_zf = a.Analyzer(MoTypes(False), std_zf, "sim_store.hdf5", "activity_store.hdf5")
std_ce = c.GradientData.load_standards("ce_gd_training_data.hdf5")
ana_ce = a.Analyzer(MoTypes(True), std_ce, "ce_sim_store.hdf5", "ce_activity_store.hdf5")
# load activity clusters from file
clfile = h5py.File("cluster_info.hdf5", "r")
clust_ids_zf = np.array(clfile["clust_ids"])
clfile.close()
clfile = h5py.File("ce_cluster_info.hdf5", "r")
clust_ids_ce = np.array(clfile["clust_ids"])
clfile.close()
# load and interpolate temperature stimulus
dfile = h5py.File("stimFile.hdf5", 'r')
tsin = np.array(dfile['sine_L_H_temp'])
x = np.arange(tsin.size) # stored at 20 Hz !
xinterp = np.linspace(0, tsin.size, tsin.size * GlobalDefs.frame_rate // 20)
temperature = np.interp(xinterp, x, tsin)
dfile.close()
# get activity data
all_ids_zf = []
all_cells_zf = []
for i, p in enumerate(paths_512_zf):
cell_res, ids = ana_zf.temperature_activity(mpath(base_path_zf, p), temperature, i)
all_ids_zf.append(ids)
all_cells_zf.append(cell_res)
all_ids_zf = np.hstack(all_ids_zf)
all_cells_zf = np.hstack(all_cells_zf)
all_ids_ce = []
all_cells_ce = []
for i, p in enumerate(paths_512_ce):
cell_res, ids = ana_ce.temperature_activity(mpath(base_path_ce, p), temperature, i)
all_ids_ce.append(ids)
all_cells_ce.append(cell_res)
all_ids_ce = np.hstack(all_ids_ce)
all_cells_ce = np.hstack(all_cells_ce)
# convolve activity with nuclear gcamp calcium kernel
tau_on = 1.4 # seconds
tau_on *= GlobalDefs.frame_rate # in frames
tau_off = 2 # seconds
tau_off *= GlobalDefs.frame_rate # in frames
kframes = np.arange(10 * GlobalDefs.frame_rate) # 10 s long kernel
kernel = 2 ** (-kframes / tau_off) * (1 - 2 ** (-kframes / tau_on))
kernel = kernel / kernel.sum()
# convolve with our kernel
for i in range(all_cells_zf.shape[1]):
all_cells_zf[:, i] = convolve(all_cells_zf[:, i], kernel, mode='full')[:all_cells_zf.shape[0]]
for i in range(all_cells_ce.shape[1]):
all_cells_ce[:, i] = convolve(all_cells_ce[:, i], kernel, mode='full')[:all_cells_ce.shape[0]]
trial_time = np.arange(all_cells_zf.shape[0] // 3) / GlobalDefs.frame_rate
# plot colors
pal = sns.color_palette() # the default matplotlib color cycle
plot_cols_ce = {0: pal[0], 1: pal[3], 2: pal[2], 3: pal[4], 4: pal[5],
5: (0.6, 0.6, 0.6), 6: pal[6], 7: pal[1], -1: (0.6, 0.6, 0.6)}
step_min = 23
step_max = 27
temp_step = np.zeros(temperature.size // 3)
temp_step[:temp_step.size//5] = step_min
temp_step[temp_step.size*4//5:] = step_max
ramp = temp_step[temp_step.size//5:temp_step.size*4//5]
ramp = np.arange(ramp.size)/ramp.size*(step_max-step_min) + step_min
temp_step[temp_step.size//5:temp_step.size*4//5] = ramp
cells_ce_step = []
for i, p in enumerate(paths_512_ce):
cell_res, ids = ana_ce.temperature_activity(mpath(base_path_ce, p), temp_step, i)
cells_ce_step.append(cell_res)
cells_ce_step = np.hstack(cells_ce_step)
for i in range(cells_ce_step.shape[1]):
cells_ce_step[:, i] = convolve(cells_ce_step[:, i], kernel, mode='full')[:cells_ce_step.shape[0]]
# panel - all cluster activities, sorted into ON and OFF types
n_regs = np.unique(clust_ids_ce).size - 1
cluster_acts = np.zeros((cells_ce_step.shape[0], n_regs))
for i in range(n_regs):
cluster_acts[:, i] = np.mean(cells_ce_step[:, clust_ids_ce == i], 1)
on_count = 0
off_count = 0
fig, (axes_on, axes_off) = pl.subplots(ncols=2, nrows=2, sharey=True, sharex=True)
time = np.arange(cluster_acts.shape[0]) / GlobalDefs.frame_rate
for i in range(n_regs):
act = cluster_acts[:, i]
if np.corrcoef(act, temp_step[:act.size])[0, 1] < 0:
ax_off = axes_off[0] if off_count < 2 else axes_off[1]
ax_off.plot(time, cluster_acts[:, i], color=plot_cols_ce[i])
off_count += 1
else:
ax_on = axes_on[0] if on_count < 2 else axes_on[1]
ax_on.plot(time, cluster_acts[:, i], color=plot_cols_ce[i])
on_count += 1
axes_off[0].set_xticks([0, 30, 60, 90, 120, 150])
axes_off[1].set_xticks([0, 30, 60, 90, 120, 150])
axes_off[0].set_xlabel("Time [s]")
axes_off[1].set_xlabel("Time [s]")
axes_on[0].set_ylabel("Cluster average activation")
axes_off[0].set_ylabel("Cluster average activation")
sns.despine()
fig.tight_layout()
fig.savefig(save_folder + "ce_all_cluster_averages.pdf", type="pdf")
# panel - average type counts in temperature branch for each cluster
cl_type_d = {"Fraction": [], "net_id": [], "Cluster ID": [], "Layer": []}
for i in range(len(paths_512_zf)):
for j in range(-1, n_regs):
for k in range(2):
lay_clust_ids = clust_ids_ce[np.logical_and(all_ids_ce[0, :] == i, all_ids_ce[1, :] == k)]
cl_type_d["Fraction"].append(np.sum(lay_clust_ids == j) / 512)
cl_type_d["net_id"].append(i)
cl_type_d["Cluster ID"].append(j)
cl_type_d["Layer"].append(k)
cl_type_df = DataFrame(cl_type_d)
fig, (ax_0, ax_1) = pl.subplots(nrows=2, sharex=True)
sns.barplot("Cluster ID", "Fraction", data=cl_type_df[cl_type_df["Layer"] == 0], order=list(range(n_regs)) + [-1],
ci=68, ax=ax_0, palette=plot_cols_ce)
sns.barplot("Cluster ID", "Fraction", data=cl_type_df[cl_type_df["Layer"] == 1], order=list(range(n_regs)) + [-1],
ci=68, ax=ax_1, palette=plot_cols_ce)
ax_0.set_yticks([0, 0.1, 0.2, 0.3, 0.4])
ax_1.set_yticks([0, 0.1, 0.2, 0.3, 0.4])
sns.despine(fig)
fig.savefig(save_folder + "ce_all_cluster_counts.pdf", type="pdf")
# panel - input connectivity into second layer of t branch
conn_mat = np.zeros((8, 8, len(paths_512_ce)))
for i, p in enumerate(paths_512_ce):
model_cids = clust_ids_ce[all_ids_ce[0, :] == i]
layers_ids = all_ids_ce[1, :][all_ids_ce[0, :] == i]
l_0_mask = np.full(8, False)
ix = model_cids[layers_ids == 0]
ix = ix[ix != -1]
l_0_mask[ix] = True
l_1_mask = np.full(8, False)
ix = model_cids[layers_ids == 1]
ix = ix[ix != -1]
l_1_mask[ix] = True
m_path = mpath(base_path_ce, p)
mdata = c.ModelData(m_path)
gpn = MoTypes(True).network_model()
gpn.load(mdata.ModelDefinition, mdata.LastCheckpoint)
input_result = gpn.parse_layer_input_by_cluster('t', 1, model_cids[layers_ids == 0],
model_cids[layers_ids == 1])
for k, l0 in enumerate(np.arange(8)[l_0_mask]):
for l, l1 in enumerate(np.arange(8)[l_1_mask]):
conn_mat[l0, l1, i] = input_result[k, l]
# reordered version of conn_mat based on known types
cm_order = [1, 7, 0, 2, 3, 4, 5, 6]
cm_reordered = conn_mat[:, cm_order, :]
cm_reordered = cm_reordered[cm_order, :, :]
m = np.mean(cm_reordered, 2)
s = np.std(cm_reordered, 2)
cross_0 = np.sign((m+s) * (m-s)) <= 0
m[cross_0] = 0
s[cross_0] = 0
fig, axes = pl.subplots(nrows=4, sharex=True, sharey=True)
for i in range(4):
axes[i].bar(np.arange(8), m[:, i], width=[.8]*4+[.3]*4)
axes[i].errorbar(np.arange(8), m[:, i], s[:, i], color='k', fmt='none')
axes[-1].set_xticks(np.arange(8))
axes[-1].set_xticklabels(["AFD", "AWC/AIY", "0", "2", "3", "4", "5", "6"])
sns.despine(fig)
fig.tight_layout()
fig.savefig(save_folder + "ce_avg_connectivity_weights.pdf", type="pdf")
# Panel 9: Robustness of C elegans network to random deletions
percent_to_remove = [25, 50, 75, 85, 90, 95, 97, 99]
rem_d = {"state": [], "values": [], "species": []}
# store the random removal drop-lists to disk so that we can quickly re-make this panel from
# stored simulation results - as these depend on the drop-list they would never be loaded
# if drop-lists are randomized every time
dlist_file = h5py.File("drop_lists.hdf5")
for i, p in enumerate(paths_512_ce):
mp = mpath(base_path_ce, p)
pos = ana_ce.run_simulation(mp, "r", "naive")
rem_d["values"].append(a.preferred_fraction(pos, "r", 1.0))
rem_d["state"].append("naive")
rem_d["species"].append("C elegans")
pos = ana_ce.run_simulation(mp, "r", "trained")
rem_d["values"].append(a.preferred_fraction(pos, "r", 1.0))
rem_d["state"].append("trained")
rem_d["species"].append("C elegans")
for ptr in percent_to_remove:
file_key = mp + "_{0}".format(ptr)
rem_d["state"].append("{0} %".format(ptr))
rem_d["species"].append("C elegans")
rand_clusts = np.zeros(all_ids_ce.shape[1])
nw_units = rand_clusts.size // (len(paths_512_ce) * 2)
if file_key in dlist_file:
dlist = pickle.loads(np.array(dlist_file[file_key]))
else:
for j in range(len(paths_512_ce) * 2):
rand_clusts[j * nw_units:j * nw_units + int(nw_units * ptr / 100)] = 1
dlist = a.create_det_drop_list(i, rand_clusts, all_ids_ce, [1], True)
dlist_file.create_dataset(file_key, data=np.void(pickle.dumps(dlist, pickle.HIGHEST_PROTOCOL)))
pos = ana_ce.run_simulation(mp, "r", "trained", drop_list=dlist)
rem_d["values"].append(a.preferred_fraction(pos, "r", 1.0))
dlist_file.close()
rem_d = | DataFrame(rem_d) | pandas.DataFrame |
# %% Imports
import os
import glob
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patheffects as path_effects
import seaborn as sns
from sklearn.linear_model import LinearRegression
from scipy.optimize import least_squares
from ruamel_yaml import YAML
original_np_seterr = np.seterr(all='raise') # raise exceptions rather than warnings
# %% The initial set of countries that we're interested in
countries = ["Australia", "Austria", "Belgium", "Brazil", "Canada", "Chile", "China", "Czechia", "Denmark", "Ecuador", "Finland", "France", "Germany", "Greece", "Iceland", "Indonesia", "Iran", "Ireland", "Israel", "Italy", "Japan", "Luxembourg", "Malaysia", "Netherlands", "Norway", "Pakistan", "Poland", "Portugal", "Saudi Arabia", "South Korea", "Spain", "Sweden", "Switzerland", "Thailand", "Turkey", "United States", "United Kingdom"]
use_all_countries = False # if set to 'True', reset 'countries' to all countries found in the data, after the data is loaded
us_states = ["California", "New York", "Texas", "Arizona", "Florida", "Washington"]; # cherrypick these states from the NY Times US Data Set
ca_provinces = ["Ontario", "Quebec"] # cherrypick these provinces from the JHU Data Set
statuses = ['confirmed', 'deaths'] # leave out 'recovered' for now since they are less informative and make the plots confusing
# %% Load the data from the external repository
ts_global = {
'confirmed': pd.read_csv("https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv", header=0, index_col=1),
'deaths': pd.read_csv("https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv", header=0, index_col=1),
'recovered': | pd.read_csv("https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv", header=0, index_col=1) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on 2018-09-13
@author: <NAME>
"""
import numpy as np
import pandas as pd
CURRENT_ROUND = 38
# Load data from all 2018 rounds
# Data from https://github.com/henriquepgomide/caRtola
rounds = []
rounds.append(pd.read_csv('data/rodada-1.csv'))
rounds.append(pd.read_csv('2018/data/rodada-2.csv'))
rounds.append(pd.read_csv('2018/data/rodada-3.csv'))
rounds.append(pd.read_csv('2018/data/rodada-4.csv'))
rounds.append(pd.read_csv('2018/data/rodada-5.csv'))
rounds.append(pd.read_csv('2018/data/rodada-6.csv'))
rounds.append(pd.read_csv('2018/data/rodada-7.csv'))
rounds.append(pd.read_csv('2018/data/rodada-8.csv'))
rounds.append(pd.read_csv('2018/data/rodada-9.csv'))
rounds.append(pd.read_csv('2018/data/rodada-10.csv'))
rounds.append(pd.read_csv('2018/data/rodada-11.csv'))
rounds.append(pd.read_csv('2018/data/rodada-12.csv'))
rounds.append(pd.read_csv('2018/data/rodada-13.csv'))
rounds.append(pd.read_csv('2018/data/rodada-14.csv'))
rounds.append(pd.read_csv('2018/data/rodada-15.csv'))
rounds.append(pd.read_csv('2018/data/rodada-16.csv'))
rounds.append(pd.read_csv('2018/data/rodada-17.csv'))
rounds.append(pd.read_csv('2018/data/rodada-18.csv'))
rounds.append(pd.read_csv('2018/data/rodada-19.csv'))
rounds.append(pd.read_csv('2018/data/rodada-20.csv'))
rounds.append(pd.read_csv('2018/data/rodada-21.csv'))
rounds.append(pd.read_csv('2018/data/rodada-22.csv'))
rounds.append(pd.read_csv('2018/data/rodada-23.csv'))
rounds.append( | pd.read_csv('2018/data/rodada-24.csv') | pandas.read_csv |
import pandas as pd
import altair as alt
from typing import List
from gettext import NullTranslations
def calculate_positive_tests_ratio(
df: pd.DataFrame, lang: NullTranslations
) -> pd.DataFrame:
"""
Calculates new column that is the new positive to tests ratio
"""
_ = lang.gettext
daily_tests_df = diff_over_previous_datapoint(df, "data", _("casi_testati"))
daily_tests_df[_("positivi_per_tampone_%")] = (
daily_tests_df[_("nuovi_positivi")] / daily_tests_df[_("casi_testati")] * 100
)
return daily_tests_df
def get_data() -> pd.DataFrame:
"""
Gets data from the GitHub repository of the Protezione Civile
"""
data = pd.read_csv(
"https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-regioni/dpc-covid19-ita-regioni.csv"
)
# Remove the time and just focus on the date
filtered_date_column = data["data"][data["data"].str.len() == 19]
data["data"] = (
pd.to_datetime(filtered_date_column).apply(lambda x: x.date()).dropna()
)
return data
def get_province_data() -> pd.DataFrame:
"""Gets data from the GitHub repository of the Protezione Civile regarding provinces"""
data = pd.read_csv(
"https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-province/dpc-covid19-ita-province.csv"
)
# Remove the time and just focus on the date
filtered_date_column = data["data"][data["data"].str.len() == 19]
data["data"] = (
pd.to_datetime(filtered_date_column).apply(lambda x: x.date()).dropna()
)
return data
def get_features(data: pd.DataFrame) -> List[str]:
"""
Gets features from data, i.e. all columns except data, stato, codice_regione, denominazione_regione, lat, long
"""
feature_columns = [
column
for column in data.columns
if column
not in [
"data",
"stato",
"codice_regione",
"denominazione_regione",
"lat",
"long",
"note",
]
]
return feature_columns
def get_features_provinces(data: pd.DataFrame) -> List[str]:
"""
Gets features from data, i.e. all columns except data, stato, codice_regione, denominazione_regione, lat, long
"""
columns = set(data.columns.tolist())
features = columns.difference(
[
"data",
"stato",
"codice_regione",
"denominazione_regione",
"lat",
"long",
"note",
"sigla_provincia",
"denominazione_provincia",
"codice_provincia",
]
)
return list(features)
def formatter(name: str) -> str:
if name == "hospitalised_in_ICU":
return "Hospitalised in ICU"
else:
return " ".join(name.capitalize().split("_"))
def dataframe_translator(data: pd.DataFrame, lang: NullTranslations) -> pd.DataFrame:
"""
Translates original column features into language defined
"""
_ = lang.gettext
feature_mapping = {
"ricoverati_con_sintomi": _("ricoverati_con_sintomi"),
"terapia_intensiva": _("terapia_intensiva"),
"totale_ospedalizzati": _("totale_ospedalizzati"),
"isolamento_domiciliare": _("isolamento_domiciliare"),
"totale_positivi": _("totale_positivi"),
"variazione_totale_positivi": _("variazione_totale_positivi"),
"nuovi_positivi": _("nuovi_positivi"),
"dimessi_guariti": _("dimessi_guariti"),
"deceduti": _("deceduti"),
"casi_da_sospetto_diagnostico": _("casi_da_sospetto_diagnostico"),
"casi_da_screening": _("casi_da_screening"),
"totale_casi": _("totale_casi"),
"tamponi": _("tamponi"),
"casi_testati": _("casi_testati"),
"positivi_per_tampone_%": _("positivi_per_tampone_%"),
}
data.columns = [
feature_mapping[feature] if feature in feature_mapping else feature
for feature in data.columns
]
return data
def calculate_growth_factor(
data: pd.DataFrame, features: List[str], prefix: str = "growth_factor"
) -> pd.DataFrame:
for feature in features:
data[f"{feature}_yesterday"] = data[feature].shift()
data[f"{prefix}_{feature}"] = data[feature] / data[f"{feature}_yesterday"]
return data
def regional_growth_factor(
data: pd.DataFrame, features: List[str], prefix: str = "growth_factor"
) -> pd.DataFrame:
regions_raw = []
for region_name, region in data.groupby("denominazione_regione"):
region = region.sort_values("data")
region = calculate_growth_factor(region, features, prefix=prefix)
regions_raw.append(region)
data = | pd.concat(regions_raw) | pandas.concat |
#%load_ext autoreload
#%autoreload 2
import dataclasses
import glob
import logging
import os
import shutil
import warnings
from dataclasses import dataclass
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
from scipy.sparse.csr import csr_matrix
from psykoda import detection, feature_extraction, preprocess, utils
from psykoda.constants import COMMANDLINE_DATE_FORMAT, col
from psykoda.io import labeled, reporting
logger = logging.getLogger(__name__)
to_stderr = {"_log_err": True}
FILENAME_WEIGHT = "best_weight.h5"
FILENAME_IDF_SID = "idf_sid.csv"
FILENAME_IDF_DPORT = "idf_dport.csv"
FILENAME_PLOT_DETECTION = "plot_detection.png"
FILENAME_STATS = "stats.json"
FILENAME_REPORT = "report.csv"
FILENAME_FEATURE_MATRIX = "feature_matrix.csv"
def configure_logging(debug: bool):
"""
Configure execution log settings.
Parameters
----------
debug
Whether to log "debug levels".
"""
PATH_LOG = "./log/log_" + datetime.strftime(datetime.today(), "%Y-%m-%d") + ".log"
os.makedirs(os.path.dirname(PATH_LOG), exist_ok=True)
log_level = logging.INFO
if debug:
log_level = logging.DEBUG
# utilities
stderr_filter = lambda record: getattr(record, "_log_err", False)
# app config
stderr_handler = logging.StreamHandler()
stderr_handler.addFilter(stderr_filter)
stderr_handler.setLevel(logging.INFO)
stderr_handler.setFormatter(logging.Formatter("%(message)s"))
handlers = [stderr_handler]
logfile_handler = logging.FileHandler(PATH_LOG)
logfile_handler.setLevel(logging.DEBUG)
logfile_handler.setFormatter(
logging.Formatter(
"%(asctime)s %(levelname)-8s [%(module)s # %(funcName)s line %(lineno)d] %(message)s"
)
)
handlers.append(logfile_handler)
logging.basicConfig(handlers=handlers, level=log_level)
class Incomplete_Args_Exception(Exception):
pass
load_config = utils.load_json
@dataclass
class OutputConfig:
dir: str
share_dir: Optional[str]
subdir: Optional[str]
@dataclass
class PreprocessConfig:
exclude_lists: Optional[str]
screening: preprocess.ScreeningConfig
@dataclass
class InputConfig:
dir: str
@dataclasses.dataclass
class LoadPreviousConfigItem:
list: Optional[str]
ndate: int = 730
@dataclasses.dataclass
class LoadPreviousConfig:
"""
Log loading settings.
Parameters
----------
list
path to CSV file in which labeled IP addresses are listed
ndate
time range for labeled IP addresses, in days
"""
known_normal: Optional[LoadPreviousConfigItem]
known_anomaly: Optional[LoadPreviousConfigItem]
unknown: Optional[LoadPreviousConfigItem]
@dataclass
class PreviousConfig:
load: LoadPreviousConfig
log: labeled.Config
@dataclass
class IOConfig:
input: InputConfig
previous: PreviousConfig
output: OutputConfig
@dataclass
class Service:
"""Service definition: set of destination port numbers
Examples
--------
>>> all = Service()
>>> ssh = Service(include=[22])
>>> all_but_ssh = Service(exclude=[22])
>>> ssh_or_https = Service(include=[22, 443])
"""
include: Optional[List[int]]
exclude: Optional[List[int]]
@dataclass
class Subnet:
"""Subnet configuration: set of CIDR-formatted IP addresses with services to analyze
Examples
--------
>>> private_A = Subnet(["10.0.0.0/8"], get_names_of_services_from_config())
>>> private = Subnet(["private-A", "private-B", "private-C"], get_names_of_services_from_config()) # these constants are available for convenience and readability
>>> my_network = Subnet(["10.0.0.0/16", "10.1.1.0/24"], get_names_of_services_from_config())
"""
cidrs: List[str]
services: List[str]
@dataclass
class DetectionUnitConfig:
"""Detection unit configuration
Parameters
----------
services
map from names of service to service definitions
subnets
map from names of subnet to subnet configurations
"""
services: Dict[str, Service]
subnets: Dict[str, Subnet]
@dataclass
class TargetPeriod:
days: int = 30
@dataclass
class ArgumentsConfig:
"""Arguments modification configuration
Parameters
----------
target_period:
default target period used to determine date_from and date_to values if missing.
"""
target_period: TargetPeriod
def set_default_date_detect(args, config: ArgumentsConfig):
"""
Configure training from/to dates according to args and config.
Parameters
----------
args
Command line args.
config
Settings for arguments.
Returns
-------
args
Command line args with training from/to dates added.
"""
date_time_today = datetime.today()
if args.date_from is None:
args.date_from = date_time_today - timedelta(config.target_period.days)
if args.date_to is None:
args.date_to = date_time_today - timedelta(1)
args.date_from_training = args.date_from - timedelta(args.period_train)
args.date_to_training = args.date_from - timedelta(1)
return args
@dataclass
class SkipDetectionConfig:
train: int
test: int
@dataclass
class ThresholdConfig:
num_anomaly: int
min_score: float
@dataclass
class AnomalyDetectionConfig:
required_srcip: SkipDetectionConfig
deepsad: detection.DeepSAD.Config
train: detection.DeepSAD.TrainConfig
threshold: ThresholdConfig
@dataclasses.dataclass
class DetectConfig:
arguments: ArgumentsConfig
detection_units: DetectionUnitConfig
io: IOConfig
preprocess: PreprocessConfig
feature_extraction: feature_extraction.FeatureExtractionConfig
anomaly_detection: AnomalyDetectionConfig
def main_detection(args, config: DetectConfig, log: pd.DataFrame, label: pd.Series):
"""
Parameters
----------
args
config
log
:index:
:columns:
label
filled with 1
:index:
"""
dir_report = os.path.join(config.io.output.subdir, args.subnet, args.service)
os.makedirs(dir_report, exist_ok=True)
feature_label = main_detection_prepare_data(
args, config.feature_extraction, log, label
)
if feature_label is None:
return
feature_label.idf_sid.to_csv(os.path.join(dir_report, FILENAME_IDF_SID))
feature_label.idf_dport.to_csv(os.path.join(dir_report, FILENAME_IDF_DPORT))
train_test_splitted, x_train_labeled = main_detection_after_prepare_data(
args, label, feature_label
)
stats = main_detection_skip_or_detect(
args,
log,
label,
dir_report,
feature_label,
train_test_splitted,
x_train_labeled,
anomaly_detection_config=config.anomaly_detection,
previous_config=config.io.previous.log,
)
utils.save_json(stats, path=os.path.join(dir_report, FILENAME_STATS))
def main_detection_prepare_data(
args,
config: feature_extraction.FeatureExtractionConfig,
log: pd.DataFrame,
label: pd.Series,
) -> Optional[feature_extraction.FeatureLabel]:
"""Feature extraction"""
logger.info("start detect on subnet %s and service %s", args.subnet, args.service)
if len(log) == 0:
logger.info("skip analysis; no logs exist")
return None
logger.info("extracting features")
feature_label = feature_extraction.feature_extraction_all(
log=log,
iptable=pd.read_csv(config.address_to_location),
idf_config=config.idf,
)
if feature_label is None:
logger.info("skip analysis; feature matrix is None")
return None
feature_label.extract_nonzeros()
label = label.loc[label.index & feature_label.index]
feature_label.put_labels(labeled_samples=label)
feature_label.feature = feature_label.feature / feature_label.feature.max()
return feature_label
def main_detection_after_prepare_data(
args, label: pd.Series, feature_label: feature_extraction.FeatureLabel
):
"""Split data and construct labeled training feature."""
train_test_splitted = feature_label.split_train_test(args.date_to_training)
idx_labeled = [
feature_label.index.index(sample)
for sample in label.index
if sample in feature_label.index
]
x_train_labeled = feature_label.feature[idx_labeled]
return train_test_splitted, x_train_labeled
def main_detection_skip_or_detect(
args,
log: pd.DataFrame,
label: pd.Series,
dir_report: str,
feature_label: feature_extraction.FeatureLabel,
train_test_splitted,
x_train_labeled: csr_matrix,
anomaly_detection_config: AnomalyDetectionConfig,
previous_config: labeled.Config,
) -> dict:
"""Anomaly detection and output the result."""
x_train, y_train, x_test, index_test = train_test_splitted
stats = {
"subnet": args.subnet,
"service": args.service,
"date_from": args.date_from,
"date_to": args.date_to,
"num_samples_st_detection": len(index_test),
"num_samples_training": len(y_train),
"date_from_training": args.date_from_training,
"date_to_training": args.date_to_training,
"num_samples_labeled": x_train_labeled.shape[0],
"samples_labeled": label.index.tolist(),
}
logger.info("stats: %s", stats)
if len(y_train) < anomaly_detection_config.required_srcip.train:
skip_message = f"#src_ip[train] = {len(y_train)} < config.anomaly_detection.required_srcip.train = {anomaly_detection_config.required_srcip.train}"
logger.info(skip_message)
stats["skipped"] = skip_message
return stats
if len(index_test) < anomaly_detection_config.required_srcip.test:
skip_message = f"#src_ip[test] = {len(index_test)} < config.anomaly_detection.required_srcip.test = {anomaly_detection_config.required_srcip.test}"
logger.info(skip_message)
stats["skipped"] = skip_message
return stats
logger.info("training detector")
verbose = 1 if logger.root.level < 20 else 0
detector = detection.DeepSAD(anomaly_detection_config.deepsad)
detector.train(
X=x_train,
y=y_train,
path_model=os.path.join(dir_report, FILENAME_WEIGHT),
config=anomaly_detection_config.train,
verbose=verbose,
)
logger.info("outputting detection reports")
anomaly_score = detector.compute_anomaly_score(x_test, scale=True)
num_anomaly = min(
sum(anomaly_score > anomaly_detection_config.threshold.min_score),
anomaly_detection_config.threshold.num_anomaly,
)
idx_sorted = np.argsort(anomaly_score)[::-1].tolist()
idx_anomaly = idx_sorted[:num_anomaly]
anomaly_score_sorted = pd.Series(
anomaly_score[idx_sorted],
index=pd.MultiIndex.from_tuples(
[index_test[i] for i in idx_sorted],
names=(col.DATETIME_ROUNDED, col.SRC_IP),
),
name="anomaly_score",
)
x_test_embeddings = detector.compute_embeddings(x_test)
x_train_labeled_embeddings = detector.compute_embeddings(x_train_labeled)
shap_value_idx_sorted = detector.explain_anomaly(
x_test[idx_anomaly], background_samples=x_train
)
shap_value_idx_sorted = pd.DataFrame(
shap_value_idx_sorted,
index=pd.MultiIndex.from_tuples(
[index_test[i] for i in idx_anomaly],
names=(col.DATETIME_ROUNDED, col.SRC_IP),
),
columns=feature_label.columns,
)
stats = output_result(
args,
log,
label,
dir_report,
x_train_labeled_embeddings=x_train_labeled_embeddings,
x_test_embeddings=x_test_embeddings,
idx_anomaly=idx_anomaly,
shap_value_idx_sorted=shap_value_idx_sorted,
anomaly_score_sorted=anomaly_score_sorted,
stats=stats,
previous_config=previous_config,
)
if args.debug:
if isinstance(x_test, csr_matrix):
x_test = x_test.toarray()
ret = pd.DataFrame(x_test, index=index_test, columns=feature_label.columns)
ret = ret.iloc[idx_sorted]
ret.to_csv(os.path.join(dir_report, FILENAME_FEATURE_MATRIX))
return stats
def output_result(
args,
log: pd.DataFrame,
label: pd.Series,
dir_report: str,
*,
x_train_labeled_embeddings,
x_test_embeddings,
idx_anomaly,
shap_value_idx_sorted,
anomaly_score_sorted,
stats: dict,
previous_config: labeled.Config,
):
"""Plot the detection result and output the report."""
reporting.plot.plot_detection(
X=x_test_embeddings,
idx_anomaly=idx_anomaly,
name_anomaly=shap_value_idx_sorted.index,
X_labeled=x_train_labeled_embeddings,
name_labeled=label.index,
path_saved=os.path.join(dir_report, FILENAME_PLOT_DETECTION),
no_plot=args.no_plot,
)
detection.detection_report(
anomaly_score_sorted,
shap_value_idx_sorted,
shap_top_k=5,
).to_csv(os.path.join(dir_report, FILENAME_REPORT))
labeled.factory(previous_config)[1].save_previous_log(
df=log,
entries=shap_value_idx_sorted.index,
)
stats["num_anomaly"] = len(idx_anomaly)
stats["name_anomaly"] = shap_value_idx_sorted.index.tolist()
logger.info(
"successfully finish detection on subnet %s and service %s\n",
args.subnet,
args.service,
)
return stats
def report_all(path_list_stats: List[str], path_save: str):
"""
Summarizing all reports and save it.
Parameters
----------
path_list_stats : list
List of stats file paths
path_save : str
File path where the report will be saved
"""
os.makedirs(os.path.dirname(path_save), exist_ok=True)
logger.info("summarizing all reports...")
results_pd = pd.DataFrame(
[], columns=["datetime_rounded", "src_ip", "subnet", "service"]
)
idx = 0
for path in path_list_stats:
# Load stats
stats = utils.load_json(path)
subnet, service = stats["subnet"], stats["service"]
try:
anomaly_list = stats["name_anomaly"]
except (KeyError, TypeError):
continue
if not anomaly_list:
continue
# Load report
path_report = path.replace(FILENAME_STATS, FILENAME_REPORT)
report = pd.read_csv(path_report, index_col=[0, 1], parse_dates=[0])
logger.info(report.index)
# Store anomalies in the DataFrame
for (dt, src_ip) in anomaly_list:
logger.info((dt, src_ip))
results_pd.loc[idx] = [dt, src_ip, subnet, service]
if idx == 0:
results_shaps = pd.DataFrame([], columns=report.columns)
results_shaps.loc[idx] = report.loc[(dt, src_ip)]
idx += 1
anomaly_found = idx > 0
if anomaly_found:
# Anomaly found
results_pd = pd.concat([results_pd, results_shaps], axis=1)
results_pd = results_pd.sort_values(
["anomaly_score", "datetime_rounded"], ascending=False
)
keys = results_pd["src_ip"].unique()
results_pd_group = results_pd.groupby("src_ip")
ret = pd.DataFrame([])
for key in keys:
ret = pd.concat([ret, results_pd_group.get_group(key)])
ret.round(4).to_csv(path_save, index=False)
else:
# Anomaly not found
| pd.DataFrame([["no anomaly found"]]) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
from pkg_resources import resource_filename
def load_arrests(return_X_y=False, give_pandas=False):
"""
Loads the arrests dataset which can serve as a benchmark for fairness. It is data on
the police treatment of individuals arrested in Toronto for simple possession of small
quantities of marijuana. The goal is to predict whether or not the arrestee was released
with a summons while maintaining a degree of fairness.
:param return_X_y: If True, returns ``(data, target)`` instead of a dict object.
:param give_pandas: give the pandas dataframe instead of X, y matrices (default=False)
:Example:
>>> from sklego.datasets import load_arrests
>>> X, y = load_arrests(return_X_y=True)
>>> X.shape
(5226, 7)
>>> y.shape
(5226,)
>>> load_arrests(give_pandas=True).columns
Index(['released', 'colour', 'year', 'age', 'sex', 'employed', 'citizen',
'checks'],
dtype='object')
The dataset was copied from the carData R package and can originally be found in:
- Personal communication from <NAME>, York University.
The documentation page of the dataset from the package can be viewed here:
http://vincentarelbundock.github.io/Rdatasets/doc/carData/Arrests.html
"""
filepath = resource_filename("sklego", os.path.join("data", "arrests.zip"))
df = pd.read_csv(filepath)
if give_pandas:
return df
X, y = (
df[["colour", "year", "age", "sex", "employed", "citizen", "checks"]].values,
df["released"].values,
)
if return_X_y:
return X, y
return {"data": X, "target": y}
def load_chicken(return_X_y=False, give_pandas=False):
"""
Loads the chicken dataset. The chicken data has 578 rows and 4 columns
from an experiment on the effect of diet on early growth of chicks.
The body weights of the chicks were measured at birth and every second
day thereafter until day 20. They were also measured on day 21.
There were four groups on chicks on different protein diets.
:param return_X_y: If True, returns ``(data, target)`` instead of a dict object.
:param give_pandas: give the pandas dataframe instead of X, y matrices (default=False)
:Example:
>>> from sklego.datasets import load_chicken
>>> X, y = load_chicken(return_X_y=True)
>>> X.shape
(578, 3)
>>> y.shape
(578,)
>>> load_chicken(give_pandas=True).columns
Index(['weight', 'time', 'chick', 'diet'], dtype='object')
The datasets can be found in the following sources:
- Crowder, M. and <NAME>. (1990), Analysis of Repeated Measures, Chapman and Hall (example 5.3)
- Hand, D. and <NAME>. (1996), Practical Longitudinal Data Analysis, Chapman and Hall (table A.2)
"""
filepath = resource_filename("sklego", os.path.join("data", "chickweight.zip"))
df = pd.read_csv(filepath)
if give_pandas:
return df
if give_pandas:
return df
X, y = df[["time", "diet", "chick"]].values, df["weight"].values
if return_X_y:
return X, y
return {"data": X, "target": y}
def load_abalone(return_X_y=False, give_pandas=False):
"""
Loads the abalone dataset where the goal is to predict the gender of the creature.
:param return_X_y: If True, returns ``(data, target)`` instead of a dict object.
:param give_pandas: give the pandas dataframe instead of X, y matrices (default=False)
:Example:
>>> from sklego.datasets import load_abalone
>>> X, y = load_abalone(return_X_y=True)
>>> X.shape
(4177, 8)
>>> y.shape
(4177,)
>>> load_abalone(give_pandas=True).columns
Index(['sex', 'length', 'diameter', 'height', 'whole_weight', 'shucked_weight',
'viscera_weight', 'shell_weight', 'rings'],
dtype='object')
The dataset was copied from Kaggle and can originally be found in: can be found in the following sources:
- <NAME>, <NAME>, <NAME>, <NAME> and <NAME> (1994)
"The Population Biology of Abalone (_Haliotis_ species) in Tasmania."
Sea Fisheries Division, Technical Report No. 48 (ISSN 1034-3288)
"""
filepath = resource_filename("sklego", os.path.join("data", "abalone.zip"))
df = pd.read_csv(filepath)
if give_pandas:
return df
X = df[
[
"length",
"diameter",
"height",
"whole_weight",
"shucked_weight",
"viscera_weight",
"shell_weight",
"rings",
]
].values
y = df["sex"].values
if return_X_y:
return X, y
return {"data": X, "target": y}
def load_heroes(return_X_y=False, give_pandas=False):
"""
A dataset from a video game: "heroes of the storm". The goal of the dataset
is to predict the attack type. Note that the pandas dataset returns more information.
This is because we wanted to keep the X simple in the return_X_y case.
:param return_X_y: If True, returns ``(data, target)`` instead of a dict object.
:param give_pandas: give the pandas dataframe instead of X, y matrices (default=False)
:Example:
>>> X, y = load_heroes(return_X_y=True)
>>> X.shape
(84, 2)
>>> y.shape
(84,)
>>> df = load_heroes(give_pandas=True)
>>> df.columns
Index(['name', 'attack_type', 'role', 'health', 'attack', 'attack_spd'], dtype='object')
"""
filepath = resource_filename("sklego", os.path.join("data", "heroes.zip"))
df = pd.read_csv(filepath)
if give_pandas:
return df
X = df[["health", "attack"]].values
y = df["attack_type"].values
if return_X_y:
return X, y
return {"data": X, "target": y}
def make_simpleseries(
n_samples=365 * 5,
trend=0.001,
season_trend=0.001,
noise=0.5,
give_pandas=False,
seed=None,
stack_noise=False,
start_date=None,
):
"""
Generate a very simple timeseries dataset to play with. The generator
assumes to generate daily data with a season, trend and noise.
:param n_samples: The number of days to simulate the timeseries for.
:param trend: The long term trend in the dataset.
:param season_trend: The long term trend in the seasonality.
:param noise: The noise that is applied to the dataset.
:param give_pandas: Return a pandas dataframe instead of a numpy array.
:param seed: The seed value for the randomness.
:param stack_noise: Set the noise to be stacked by a cumulative sum.
:param start_date: Also add a start date (only works if `give_pandas`=True).
:return: numpy array unless dataframe is specified
:Example:
>>> from sklego.datasets import make_simpleseries
>>> make_simpleseries(seed=42)
array([-0.34078806, -0.61828731, -0.18458236, ..., -0.27547402,
-0.38237413, 0.13489355])
>>> make_simpleseries(give_pandas=True, start_date="2018-01-01", seed=42).head(3)
yt date
0 -0.340788 2018-01-01
1 -0.618287 2018-01-02
2 -0.184582 2018-01-03
"""
if seed:
np.random.seed(seed)
time = np.arange(0, n_samples)
noise = np.random.normal(0, noise, n_samples)
if stack_noise:
noise = noise.cumsum()
r1, r2 = np.random.normal(0, 1, 2)
seasonality = r1 * np.sin(time / 365 * 2 * np.pi) + r2 * np.cos(
time / 365 * 4 * np.pi + 1
)
result = seasonality + season_trend * seasonality * time + trend * time + noise
if give_pandas:
if start_date:
stamps = pd.date_range(start_date, periods=n_samples)
return pd.DataFrame({"yt": result, "date": stamps})
return | pd.DataFrame({"yt": result}) | pandas.DataFrame |
from copy import copy, deepcopy
from textwrap import dedent
try:
import cPickle as pickle
except ImportError:
import pickle
import numpy as np
import pandas as pd
from xray import (align, concat, conventions, backends, Dataset, DataArray,
Variable, Coordinate)
from xray.core import indexing, utils
from xray.core.pycompat import iteritems, OrderedDict
from . import TestCase, unittest
def create_test_data(seed=None):
rs = np.random.RandomState(seed)
_vars = {'var1': ['dim1', 'dim2'],
'var2': ['dim1', 'dim2'],
'var3': ['dim3', 'dim1']}
_dims = {'dim1': 8, 'dim2': 9, 'dim3': 10}
obj = Dataset()
obj['time'] = ('time', pd.date_range('2000-01-01', periods=20))
obj['dim1'] = ('dim1', np.arange(_dims['dim1']))
obj['dim2'] = ('dim2', 0.5 * np.arange(_dims['dim2']))
obj['dim3'] = ('dim3', list('abcdefghij'))
for v, dims in sorted(_vars.items()):
data = rs.normal(size=tuple(_dims[d] for d in dims))
obj[v] = (dims, data, {'foo': 'variable'})
obj.coords['numbers'] = ('dim3', [0, 1, 2, 0, 0, 1, 1, 2, 2, 3])
return obj
class UnexpectedDataAccess(Exception):
pass
class InaccessibleArray(utils.NDArrayMixin):
def __init__(self, array):
self.array = array
def __getitem__(self, key):
raise UnexpectedDataAccess("Tried accessing data")
class InaccessibleVariableDataStore(backends.InMemoryDataStore):
def get_variables(self):
def lazy_inaccessible(x):
data = indexing.LazilyIndexedArray(InaccessibleArray(x.values))
return Variable(x.dims, data, x.attrs)
return dict((k, lazy_inaccessible(v)) for
k, v in iteritems(self._variables))
class TestDataset(TestCase):
def test_repr(self):
data = create_test_data(seed=123)
data.attrs['foo'] = 'bar'
# need to insert str dtype at runtime to handle both Python 2 & 3
expected = dedent("""\
<xray.Dataset>
Dimensions: (dim1: 8, dim2: 9, dim3: 10, time: 20)
Coordinates:
* dim1 (dim1) int64 0 1 2 3 4 5 6 7
* dim2 (dim2) float64 0.0 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0
* dim3 (dim3) %s 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j'
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 2000-01-04 ...
numbers (dim3) int64 0 1 2 0 0 1 1 2 2 3
Data variables:
var1 (dim1, dim2) float64 -1.086 0.9973 0.283 -1.506 -0.5786 1.651 -2.427 -0.4289 ...
var2 (dim1, dim2) float64 1.162 -1.097 -2.123 1.04 -0.4034 -0.126 -0.8375 -1.606 ...
var3 (dim3, dim1) float64 0.5565 -0.2121 0.4563 1.545 -0.2397 0.1433 0.2538 ...
Attributes:
foo: bar""") % data['dim3'].dtype
actual = '\n'.join(x.rstrip() for x in repr(data).split('\n'))
print(actual)
expected = dedent("""\
<xray.Dataset>
Dimensions: ()
Coordinates:
*empty*
Data variables:
*empty*""")
actual = '\n'.join(x.rstrip() for x in repr(Dataset()).split('\n'))
print(actual)
self.assertEqual(expected, actual)
# verify that ... doesn't appear for scalar coordinates
data = Dataset({'foo': ('x', np.ones(10))}).mean()
expected = dedent("""\
<xray.Dataset>
Dimensions: ()
Coordinates:
*empty*
Data variables:
foo float64 1.0""")
actual = '\n'.join(x.rstrip() for x in repr(data).split('\n'))
print(actual)
self.assertEqual(expected, actual)
# verify long attributes are truncated
data = Dataset(attrs={'foo': 'bar' * 1000})
self.assertTrue(len(repr(data)) < 1000)
def test_constructor(self):
x1 = ('x', 2 * np.arange(100))
x2 = ('x', np.arange(1000))
z = (['x', 'y'], np.arange(1000).reshape(100, 10))
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
Dataset({'a': x1, 'b': x2})
with self.assertRaisesRegexp(ValueError, 'must be defined with 1-d'):
Dataset({'a': x1, 'x': z})
# verify handling of DataArrays
expected = Dataset({'x': x1, 'z': z})
actual = Dataset({'z': expected['z']})
self.assertDatasetIdentical(expected, actual)
def test_constructor_1d(self):
expected = Dataset({'x': (['x'], 5.0 + np.arange(5))})
actual = Dataset({'x': 5.0 + np.arange(5)})
self.assertDatasetIdentical(expected, actual)
actual = Dataset({'x': [5, 6, 7, 8, 9]})
self.assertDatasetIdentical(expected, actual)
def test_constructor_0d(self):
expected = Dataset({'x': ([], 1)})
for arg in [1, np.array(1), expected['x']]:
actual = Dataset({'x': arg})
self.assertDatasetIdentical(expected, actual)
d = pd.Timestamp('2000-01-01T12')
args = [True, None, 3.4, np.nan, 'hello', u'uni', b'raw',
np.datetime64('2000-01-01T00'), d, d.to_datetime()]
for arg in args:
print(arg)
expected = Dataset({'x': ([], arg)})
actual = Dataset({'x': arg})
self.assertDatasetIdentical(expected, actual)
def test_constructor_auto_align(self):
a = DataArray([1, 2], [('x', [0, 1])])
b = DataArray([3, 4], [('x', [1, 2])])
# verify align uses outer join
expected = Dataset({'a': ('x', [1, 2, np.nan]),
'b': ('x', [np.nan, 3, 4])})
actual = Dataset({'a': a, 'b': b})
self.assertDatasetIdentical(expected, actual)
# regression test for GH346
self.assertIsInstance(actual.variables['x'], Coordinate)
# variable with different dimensions
c = ('y', [3, 4])
expected2 = expected.merge({'c': c})
actual = Dataset({'a': a, 'b': b, 'c': c})
self.assertDatasetIdentical(expected2, actual)
# variable that is only aligned against the aligned variables
d = ('x', [3, 2, 1])
expected3 = expected.merge({'d': d})
actual = Dataset({'a': a, 'b': b, 'd': d})
self.assertDatasetIdentical(expected3, actual)
e = ('x', [0, 0])
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
Dataset({'a': a, 'b': b, 'e': e})
def test_constructor_compat(self):
data = OrderedDict([('x', DataArray(0, coords={'y': 1})),
('y', ('z', [1, 1, 1]))])
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
Dataset(data, compat='equals')
expected = Dataset({'x': 0}, {'y': ('z', [1, 1, 1])})
actual = Dataset(data)
self.assertDatasetIdentical(expected, actual)
actual = Dataset(data, compat='broadcast_equals')
self.assertDatasetIdentical(expected, actual)
data = OrderedDict([('y', ('z', [1, 1, 1])),
('x', DataArray(0, coords={'y': 1}))])
actual = Dataset(data)
self.assertDatasetIdentical(expected, actual)
original = Dataset({'a': (('x', 'y'), np.ones((2, 3)))},
{'c': (('x', 'y'), np.zeros((2, 3)))})
expected = Dataset({'a': ('x', np.ones(2)),
'b': ('y', np.ones(3))},
{'c': (('x', 'y'), np.zeros((2, 3)))})
# use an OrderedDict to ensure test results are reproducible; otherwise
# the order of appearance of x and y matters for the order of
# dimensions in 'c'
actual = Dataset(OrderedDict([('a', original['a'][:, 0].drop('y')),
('b', original['a'][0].drop('x'))]))
self.assertDatasetIdentical(expected, actual)
data = {'x': DataArray(0, coords={'y': 3}), 'y': ('z', [1, 1, 1])}
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
Dataset(data)
def test_constructor_with_coords(self):
with self.assertRaisesRegexp(ValueError, 'redundant variables and co'):
Dataset({'a': ('x', [1])}, {'a': ('x', [1])})
ds = Dataset({}, {'a': ('x', [1])})
self.assertFalse(ds.data_vars)
self.assertItemsEqual(ds.coords.keys(), ['x', 'a'])
def test_properties(self):
ds = create_test_data()
self.assertEqual(ds.dims,
{'dim1': 8, 'dim2': 9, 'dim3': 10, 'time': 20})
self.assertItemsEqual(ds, list(ds.variables))
self.assertItemsEqual(ds.keys(), list(ds.variables))
self.assertEqual(len(ds), 8)
self.assertItemsEqual(ds.data_vars, ['var1', 'var2', 'var3'])
self.assertItemsEqual(ds.data_vars.keys(), ['var1', 'var2', 'var3'])
self.assertIn('var1', ds.data_vars)
self.assertNotIn('dim1', ds.data_vars)
self.assertNotIn('numbers', ds.data_vars)
self.assertEqual(len(ds.data_vars), 3)
self.assertItemsEqual(ds.indexes, ['dim1', 'dim2', 'dim3', 'time'])
self.assertEqual(len(ds.indexes), 4)
self.assertItemsEqual(ds.coords,
['time', 'dim1', 'dim2', 'dim3', 'numbers'])
self.assertIn('dim1', ds.coords)
self.assertIn('numbers', ds.coords)
self.assertNotIn('var1', ds.coords)
self.assertEqual(len(ds.coords), 5)
def test_attr_access(self):
ds = Dataset({'tmin': ('x', [42], {'units': 'Celcius'})},
attrs={'title': 'My test data'})
self.assertDataArrayIdentical(ds.tmin, ds['tmin'])
self.assertDataArrayIdentical(ds.tmin.x, ds.x)
self.assertEqual(ds.title, ds.attrs['title'])
self.assertEqual(ds.tmin.units, ds['tmin'].attrs['units'])
self.assertLessEqual(set(['tmin', 'title']), set(dir(ds)))
self.assertIn('units', set(dir(ds.tmin)))
# should defer to variable of same name
ds.attrs['tmin'] = -999
self.assertEqual(ds.attrs['tmin'], -999)
self.assertDataArrayIdentical(ds.tmin, ds['tmin'])
def test_variable(self):
a = Dataset()
d = np.random.random((10, 3))
a['foo'] = (('time', 'x',), d)
self.assertTrue('foo' in a.variables)
self.assertTrue('foo' in a)
a['bar'] = (('time', 'x',), d)
# order of creation is preserved
self.assertEqual(list(a), ['foo', 'time', 'x', 'bar'])
self.assertTrue(all([a['foo'][i].values == d[i]
for i in np.ndindex(*d.shape)]))
# try to add variable with dim (10,3) with data that's (3,10)
with self.assertRaises(ValueError):
a['qux'] = (('time', 'x'), d.T)
def test_modify_inplace(self):
a = Dataset()
vec = np.random.random((10,))
attributes = {'foo': 'bar'}
a['x'] = ('x', vec, attributes)
self.assertTrue('x' in a.coords)
self.assertIsInstance(a.coords['x'].to_index(),
pd.Index)
self.assertVariableIdentical(a.coords['x'], a.variables['x'])
b = Dataset()
b['x'] = ('x', vec, attributes)
self.assertVariableIdentical(a['x'], b['x'])
self.assertEqual(a.dims, b.dims)
# this should work
a['x'] = ('x', vec[:5])
a['z'] = ('x', np.arange(5))
with self.assertRaises(ValueError):
# now it shouldn't, since there is a conflicting length
a['x'] = ('x', vec[:4])
arr = np.random.random((10, 1,))
scal = np.array(0)
with self.assertRaises(ValueError):
a['y'] = ('y', arr)
with self.assertRaises(ValueError):
a['y'] = ('y', scal)
self.assertTrue('y' not in a.dims)
def test_coords_properties(self):
# use an OrderedDict for coordinates to ensure order across python
# versions
# use int64 for repr consistency on windows
data = Dataset(OrderedDict([('x', ('x', np.array([-1, -2], 'int64'))),
('y', ('y', np.array([0, 1, 2], 'int64'))),
('foo', (['x', 'y'],
np.random.randn(2, 3)))]),
OrderedDict([('a', ('x', np.array([4, 5], 'int64'))),
('b', np.int64(-10))]))
self.assertEqual(4, len(data.coords))
self.assertItemsEqual(['x', 'y', 'a', 'b'], list(data.coords))
self.assertVariableIdentical(data.coords['x'], data['x'].variable)
self.assertVariableIdentical(data.coords['y'], data['y'].variable)
self.assertIn('x', data.coords)
self.assertIn('a', data.coords)
self.assertNotIn(0, data.coords)
self.assertNotIn('foo', data.coords)
with self.assertRaises(KeyError):
data.coords['foo']
with self.assertRaises(KeyError):
data.coords[0]
expected = dedent("""\
Coordinates:
* x (x) int64 -1 -2
* y (y) int64 0 1 2
a (x) int64 4 5
b int64 -10""")
actual = repr(data.coords)
self.assertEqual(expected, actual)
self.assertEqual({'x': 2, 'y': 3}, data.coords.dims)
def test_coords_modify(self):
data = Dataset({'x': ('x', [-1, -2]),
'y': ('y', [0, 1, 2]),
'foo': (['x', 'y'], np.random.randn(2, 3))},
{'a': ('x', [4, 5]), 'b': -10})
actual = data.copy(deep=True)
actual.coords['x'] = ('x', ['a', 'b'])
self.assertArrayEqual(actual['x'], ['a', 'b'])
actual = data.copy(deep=True)
actual.coords['z'] = ('z', ['a', 'b'])
self.assertArrayEqual(actual['z'], ['a', 'b'])
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
data.coords['x'] = ('x', [-1])
actual = data.copy()
del actual.coords['b']
expected = data.reset_coords('b', drop=True)
self.assertDatasetIdentical(expected, actual)
with self.assertRaises(KeyError):
del data.coords['not_found']
with self.assertRaises(KeyError):
del data.coords['foo']
def test_coords_set(self):
one_coord = Dataset({'x': ('x', [0]),
'yy': ('x', [1]),
'zzz': ('x', [2])})
two_coords = Dataset({'zzz': ('x', [2])},
{'x': ('x', [0]),
'yy': ('x', [1])})
all_coords = Dataset(coords={'x': ('x', [0]),
'yy': ('x', [1]),
'zzz': ('x', [2])})
actual = one_coord.set_coords('x')
self.assertDatasetIdentical(one_coord, actual)
actual = one_coord.set_coords(['x'])
self.assertDatasetIdentical(one_coord, actual)
actual = one_coord.set_coords('yy')
self.assertDatasetIdentical(two_coords, actual)
actual = one_coord.set_coords(['yy', 'zzz'])
self.assertDatasetIdentical(all_coords, actual)
actual = one_coord.reset_coords()
self.assertDatasetIdentical(one_coord, actual)
actual = two_coords.reset_coords()
self.assertDatasetIdentical(one_coord, actual)
actual = all_coords.reset_coords()
self.assertDatasetIdentical(one_coord, actual)
actual = all_coords.reset_coords(['yy', 'zzz'])
self.assertDatasetIdentical(one_coord, actual)
actual = all_coords.reset_coords('zzz')
self.assertDatasetIdentical(two_coords, actual)
with self.assertRaisesRegexp(ValueError, 'cannot remove index'):
one_coord.reset_coords('x')
actual = all_coords.reset_coords('zzz', drop=True)
expected = all_coords.drop('zzz')
self.assertDatasetIdentical(expected, actual)
expected = two_coords.drop('zzz')
self.assertDatasetIdentical(expected, actual)
def test_coords_to_dataset(self):
orig = Dataset({'foo': ('y', [-1, 0, 1])}, {'x': 10, 'y': [2, 3, 4]})
expected = Dataset(coords={'x': 10, 'y': [2, 3, 4]})
actual = orig.coords.to_dataset()
self.assertDatasetIdentical(expected, actual)
def test_coords_merge(self):
orig_coords = Dataset(coords={'a': ('x', [1, 2])}).coords
other_coords = Dataset(coords={'b': ('x', ['a', 'b'])}).coords
expected = Dataset(coords={'a': ('x', [1, 2]),
'b': ('x', ['a', 'b'])})
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(expected, actual)
other_coords = Dataset(coords={'x': ('x', ['a'])}).coords
with self.assertRaisesRegexp(ValueError, 'not aligned'):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={'x': ('x', ['a', 'b'])}).coords
with self.assertRaisesRegexp(ValueError, 'not aligned'):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={'x': ('x', ['a', 'b', 'c'])}).coords
with self.assertRaisesRegexp(ValueError, 'not aligned'):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={'a': ('x', [8, 9])}).coords
expected = Dataset(coords={'x': range(2)})
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(expected, actual)
other_coords = Dataset(coords={'x': np.nan}).coords
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(orig_coords.to_dataset(), actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(orig_coords.to_dataset(), actual)
def test_coords_merge_mismatched_shape(self):
orig_coords = Dataset(coords={'a': ('x', [1, 1])}).coords
other_coords = Dataset(coords={'a': 1}).coords
expected = orig_coords.to_dataset()
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
other_coords = Dataset(coords={'a': ('y', [1])}).coords
expected = Dataset(coords={'a': (['x', 'y'], [[1], [1]])})
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(expected.T, actual)
orig_coords = Dataset(coords={'a': ('x', [np.nan])}).coords
other_coords = Dataset(coords={'a': np.nan}).coords
expected = orig_coords.to_dataset()
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
def test_equals_and_identical(self):
data = create_test_data(seed=42)
self.assertTrue(data.equals(data))
self.assertTrue(data.identical(data))
data2 = create_test_data(seed=42)
data2.attrs['foobar'] = 'baz'
self.assertTrue(data.equals(data2))
self.assertFalse(data.identical(data2))
del data2['time']
self.assertFalse(data.equals(data2))
data = create_test_data(seed=42).rename({'var1': None})
self.assertTrue(data.equals(data))
self.assertTrue(data.identical(data))
data2 = data.reset_coords()
self.assertFalse(data2.equals(data))
self.assertFalse(data2.identical(data))
def test_broadcast_equals(self):
data1 = Dataset(coords={'x': 0})
data2 = Dataset(coords={'x': [0]})
self.assertTrue(data1.broadcast_equals(data2))
self.assertFalse(data1.equals(data2))
self.assertFalse(data1.identical(data2))
def test_attrs(self):
data = create_test_data(seed=42)
data.attrs = {'foobar': 'baz'}
self.assertTrue(data.attrs['foobar'], 'baz')
self.assertIsInstance(data.attrs, OrderedDict)
def test_isel(self):
data = create_test_data()
slicers = {'dim1': slice(None, None, 2), 'dim2': slice(0, 2)}
ret = data.isel(**slicers)
# Verify that only the specified dimension was altered
self.assertItemsEqual(data.dims, ret.dims)
for d in data.dims:
if d in slicers:
self.assertEqual(ret.dims[d],
np.arange(data.dims[d])[slicers[d]].size)
else:
self.assertEqual(data.dims[d], ret.dims[d])
# Verify that the data is what we expect
for v in data:
self.assertEqual(data[v].dims, ret[v].dims)
self.assertEqual(data[v].attrs, ret[v].attrs)
slice_list = [slice(None)] * data[v].values.ndim
for d, s in iteritems(slicers):
if d in data[v].dims:
inds = np.nonzero(np.array(data[v].dims) == d)[0]
for ind in inds:
slice_list[ind] = s
expected = data[v].values[slice_list]
actual = ret[v].values
np.testing.assert_array_equal(expected, actual)
with self.assertRaises(ValueError):
data.isel(not_a_dim=slice(0, 2))
ret = data.isel(dim1=0)
self.assertEqual({'time': 20, 'dim2': 9, 'dim3': 10}, ret.dims)
self.assertItemsEqual(data.data_vars, ret.data_vars)
self.assertItemsEqual(data.coords, ret.coords)
self.assertItemsEqual(data.indexes, list(ret.indexes) + ['dim1'])
ret = data.isel(time=slice(2), dim1=0, dim2=slice(5))
self.assertEqual({'time': 2, 'dim2': 5, 'dim3': 10}, ret.dims)
self.assertItemsEqual(data.data_vars, ret.data_vars)
self.assertItemsEqual(data.coords, ret.coords)
self.assertItemsEqual(data.indexes, list(ret.indexes) + ['dim1'])
ret = data.isel(time=0, dim1=0, dim2=slice(5))
self.assertItemsEqual({'dim2': 5, 'dim3': 10}, ret.dims)
self.assertItemsEqual(data.data_vars, ret.data_vars)
self.assertItemsEqual(data.coords, ret.coords)
self.assertItemsEqual(data.indexes,
list(ret.indexes) + ['dim1', 'time'])
def test_sel(self):
data = create_test_data()
int_slicers = {'dim1': slice(None, None, 2),
'dim2': slice(2),
'dim3': slice(3)}
loc_slicers = {'dim1': slice(None, None, 2),
'dim2': slice(0, 0.5),
'dim3': slice('a', 'c')}
self.assertDatasetEqual(data.isel(**int_slicers),
data.sel(**loc_slicers))
data['time'] = ('time', pd.date_range('2000-01-01', periods=20))
self.assertDatasetEqual(data.isel(time=0),
data.sel(time='2000-01-01'))
self.assertDatasetEqual(data.isel(time=slice(10)),
data.sel(time=slice('2000-01-01',
'2000-01-10')))
self.assertDatasetEqual(data, data.sel(time=slice('1999', '2005')))
times = pd.date_range('2000-01-01', periods=3)
self.assertDatasetEqual(data.isel(time=slice(3)),
data.sel(time=times))
self.assertDatasetEqual(data.isel(time=slice(3)),
data.sel(time=(data['time.dayofyear'] <= 3)))
td = pd.to_timedelta(np.arange(3), unit='days')
data = Dataset({'x': ('td', np.arange(3)), 'td': td})
self.assertDatasetEqual(data, data.sel(td=td))
self.assertDatasetEqual(data, data.sel(td=slice('3 days')))
self.assertDatasetEqual(data.isel(td=0), data.sel(td='0 days'))
self.assertDatasetEqual(data.isel(td=0), data.sel(td='0h'))
self.assertDatasetEqual(data.isel(td=slice(1, 3)),
data.sel(td=slice('1 days', '2 days')))
def test_loc(self):
data = create_test_data()
expected = data.sel(dim3='a')
actual = data.loc[dict(dim3='a')]
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(TypeError, 'can only lookup dict'):
data.loc['a']
with self.assertRaises(TypeError):
data.loc[dict(dim3='a')] = 0
def test_reindex_like(self):
data = create_test_data()
data['letters'] = ('dim3', 10 * ['a'])
expected = data.isel(dim1=slice(10), time=slice(13))
actual = data.reindex_like(expected)
self.assertDatasetIdentical(actual, expected)
expected = data.copy(deep=True)
expected['dim3'] = ('dim3', list('cdefghijkl'))
expected['var3'][:-2] = expected['var3'][2:]
expected['var3'][-2:] = np.nan
expected['letters'] = expected['letters'].astype(object)
expected['letters'][-2:] = np.nan
expected['numbers'] = expected['numbers'].astype(float)
expected['numbers'][:-2] = expected['numbers'][2:].values
expected['numbers'][-2:] = np.nan
actual = data.reindex_like(expected)
self.assertDatasetIdentical(actual, expected)
def test_reindex(self):
data = create_test_data()
self.assertDatasetIdentical(data, data.reindex())
expected = data.isel(dim1=slice(10))
actual = data.reindex(dim1=data['dim1'][:10])
self.assertDatasetIdentical(actual, expected)
actual = data.reindex(dim1=data['dim1'][:10].values)
self.assertDatasetIdentical(actual, expected)
actual = data.reindex(dim1=data['dim1'][:10].to_index())
self.assertDatasetIdentical(actual, expected)
# test dict-like argument
actual = data.reindex({'dim1': data['dim1'][:10]})
self.assertDatasetIdentical(actual, expected)
with self.assertRaisesRegexp(ValueError, 'cannot specify both'):
data.reindex({'x': 0}, x=0)
with self.assertRaisesRegexp(ValueError, 'dictionary'):
data.reindex('foo')
# out of order
expected = data.sel(dim1=data['dim1'][:10:-1])
actual = data.reindex(dim1=data['dim1'][:10:-1])
self.assertDatasetIdentical(actual, expected)
# regression test for #279
expected = Dataset({'x': ('time', np.random.randn(5))})
time2 = DataArray(np.arange(5), dims="time2")
actual = expected.reindex(time=time2)
self.assertDatasetIdentical(actual, expected)
# another regression test
ds = Dataset({'foo': (['x', 'y'], np.zeros((3, 4)))})
expected = Dataset({'foo': (['x', 'y'], np.zeros((3, 2))),
'x': [0, 1, 3]})
expected['foo'][-1] = np.nan
actual = ds.reindex(x=[0, 1, 3], y=[0, 1])
self.assertDatasetIdentical(expected, actual)
def test_reindex_method(self):
ds = Dataset({'x': ('y', [10, 20])})
y = [-0.5, 0.5, 1.5]
actual = ds.reindex(y=y, method='backfill')
expected = Dataset({'x': ('y', [10, 20, np.nan]), 'y': y})
self.assertDatasetIdentical(expected, actual)
actual = ds.reindex(y=y, method='pad')
expected = Dataset({'x': ('y', [np.nan, 10, 20]), 'y': y})
self.assertDatasetIdentical(expected, actual)
alt = Dataset({'y': y})
actual = ds.reindex_like(alt, method='pad')
self.assertDatasetIdentical(expected, actual)
def test_align(self):
left = create_test_data()
right = left.copy(deep=True)
right['dim3'] = ('dim3', list('cdefghijkl'))
right['var3'][:-2] = right['var3'][2:]
right['var3'][-2:] = np.random.randn(*right['var3'][-2:].shape)
right['numbers'][:-2] = right['numbers'][2:]
right['numbers'][-2:] = -10
intersection = list('cdefghij')
union = list('abcdefghijkl')
left2, right2 = align(left, right, join='inner')
self.assertArrayEqual(left2['dim3'], intersection)
self.assertDatasetIdentical(left2, right2)
left2, right2 = align(left, right, join='outer')
self.assertVariableEqual(left2['dim3'], right2['dim3'])
self.assertArrayEqual(left2['dim3'], union)
self.assertDatasetIdentical(left2.sel(dim3=intersection),
right2.sel(dim3=intersection))
self.assertTrue(np.isnan(left2['var3'][-2:]).all())
self.assertTrue(np.isnan(right2['var3'][:2]).all())
left2, right2 = align(left, right, join='left')
self.assertVariableEqual(left2['dim3'], right2['dim3'])
self.assertVariableEqual(left2['dim3'], left['dim3'])
self.assertDatasetIdentical(left2.sel(dim3=intersection),
right2.sel(dim3=intersection))
self.assertTrue(np.isnan(right2['var3'][:2]).all())
left2, right2 = align(left, right, join='right')
self.assertVariableEqual(left2['dim3'], right2['dim3'])
self.assertVariableEqual(left2['dim3'], right['dim3'])
self.assertDatasetIdentical(left2.sel(dim3=intersection),
right2.sel(dim3=intersection))
self.assertTrue(np.isnan(left2['var3'][-2:]).all())
def test_variable_indexing(self):
data = create_test_data()
v = data['var1']
d1 = data['dim1']
d2 = data['dim2']
self.assertVariableEqual(v, v[d1.values])
self.assertVariableEqual(v, v[d1])
self.assertVariableEqual(v[:3], v[d1 < 3])
self.assertVariableEqual(v[:, 3:], v[:, d2 >= 1.5])
self.assertVariableEqual(v[:3, 3:], v[d1 < 3, d2 >= 1.5])
self.assertVariableEqual(v[:3, :2], v[range(3), range(2)])
self.assertVariableEqual(v[:3, :2], v.loc[d1[:3], d2[:2]])
def test_drop_variables(self):
data = create_test_data()
self.assertDatasetIdentical(data, data.drop([]))
expected = Dataset(dict((k, data[k]) for k in data if k != 'time'))
actual = data.drop('time')
self.assertDatasetIdentical(expected, actual)
actual = data.drop(['time'])
self.assertDatasetIdentical(expected, actual)
expected = Dataset(dict((k, data[k]) for
k in ['dim2', 'dim3', 'time', 'numbers']))
actual = data.drop('dim1')
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'cannot be found'):
data.drop('not_found_here')
def test_drop_index_labels(self):
data = Dataset({'A': (['x', 'y'], np.random.randn(2, 3)),
'x': ['a', 'b']})
actual = data.drop(1, 'y')
expected = data.isel(y=[0, 2])
self.assertDatasetIdentical(expected, actual)
actual = data.drop(['a'], 'x')
expected = data.isel(x=[1])
self.assertDatasetIdentical(expected, actual)
actual = data.drop(['a', 'b'], 'x')
expected = data.isel(x=slice(0, 0))
self.assertDatasetIdentical(expected, actual)
with self.assertRaises(ValueError):
# not contained in axis
data.drop(['c'], dim='x')
def test_copy(self):
data = create_test_data()
for copied in [data.copy(deep=False), copy(data)]:
self.assertDatasetIdentical(data, copied)
for k in data:
v0 = data.variables[k]
v1 = copied.variables[k]
self.assertIs(v0, v1)
copied['foo'] = ('z', np.arange(5))
self.assertNotIn('foo', data)
for copied in [data.copy(deep=True), deepcopy(data)]:
self.assertDatasetIdentical(data, copied)
for k in data:
v0 = data.variables[k]
v1 = copied.variables[k]
self.assertIsNot(v0, v1)
def test_rename(self):
data = create_test_data()
newnames = {'var1': 'renamed_var1', 'dim2': 'renamed_dim2'}
renamed = data.rename(newnames)
variables = OrderedDict(data.variables)
for k, v in iteritems(newnames):
variables[v] = variables.pop(k)
for k, v in iteritems(variables):
dims = list(v.dims)
for name, newname in iteritems(newnames):
if name in dims:
dims[dims.index(name)] = newname
self.assertVariableEqual(Variable(dims, v.values, v.attrs),
renamed[k])
self.assertEqual(v.encoding, renamed[k].encoding)
self.assertEqual(type(v), type(renamed.variables[k]))
self.assertTrue('var1' not in renamed)
self.assertTrue('dim2' not in renamed)
with self.assertRaisesRegexp(ValueError, "cannot rename 'not_a_var'"):
data.rename({'not_a_var': 'nada'})
# verify that we can rename a variable without accessing the data
var1 = data['var1']
data['var1'] = (var1.dims, InaccessibleArray(var1.values))
renamed = data.rename(newnames)
with self.assertRaises(UnexpectedDataAccess):
renamed['renamed_var1'].values
def test_rename_inplace(self):
times = pd.date_range('2000-01-01', periods=3)
data = Dataset({'z': ('x', [2, 3, 4]), 't': ('t', times)})
copied = data.copy()
renamed = data.rename({'x': 'y'})
data.rename({'x': 'y'}, inplace=True)
self.assertDatasetIdentical(data, renamed)
self.assertFalse(data.equals(copied))
self.assertEquals(data.dims, {'y': 3, 't': 3})
# check virtual variables
self.assertArrayEqual(data['t.dayofyear'], [1, 2, 3])
def test_update(self):
data = create_test_data(seed=0)
expected = data.copy()
var2 = Variable('dim1', np.arange(8))
actual = data.update({'var2': var2})
expected['var2'] = var2
self.assertDatasetIdentical(expected, actual)
actual = data.copy()
actual_result = actual.update(data, inplace=True)
self.assertIs(actual_result, actual)
self.assertDatasetIdentical(expected, actual)
actual = data.update(data, inplace=False)
expected = data
self.assertIsNot(actual, expected)
self.assertDatasetIdentical(expected, actual)
other = Dataset(attrs={'new': 'attr'})
actual = data.copy()
actual.update(other)
self.assertDatasetIdentical(expected, actual)
def test_update_auto_align(self):
ds = Dataset({'x': ('t', [3, 4])})
expected = Dataset({'x': ('t', [3, 4]), 'y': ('t', [np.nan, 5])})
actual = ds.copy()
other = {'y': ('t', [5]), 't': [1]}
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
actual.update(other)
actual.update(Dataset(other))
self.assertDatasetIdentical(expected, actual)
actual = ds.copy()
other = Dataset({'y': ('t', [5]), 't': [100]})
actual.update(other)
expected = Dataset({'x': ('t', [3, 4]), 'y': ('t', [np.nan] * 2)})
self.assertDatasetIdentical(expected, actual)
def test_merge(self):
data = create_test_data()
ds1 = data[['var1']]
ds2 = data[['var3']]
expected = data[['var1', 'var3']]
actual = ds1.merge(ds2)
self.assertDatasetIdentical(expected, actual)
actual = ds2.merge(ds1)
self.assertDatasetIdentical(expected, actual)
actual = data.merge(data)
self.assertDatasetIdentical(data, actual)
actual = data.reset_coords(drop=True).merge(data)
self.assertDatasetIdentical(data, actual)
actual = data.merge(data.reset_coords(drop=True))
self.assertDatasetIdentical(data, actual)
with self.assertRaises(ValueError):
ds1.merge(ds2.rename({'var3': 'var1'}))
with self.assertRaisesRegexp(ValueError, 'cannot merge'):
data.reset_coords().merge(data)
with self.assertRaisesRegexp(ValueError, 'cannot merge'):
data.merge(data.reset_coords())
def test_merge_broadcast_equals(self):
ds1 = Dataset({'x': 0})
ds2 = Dataset({'x': ('y', [0, 0])})
actual = ds1.merge(ds2)
self.assertDatasetIdentical(ds2, actual)
actual = ds2.merge(ds1)
self.assertDatasetIdentical(ds2, actual)
actual = ds1.copy()
actual.update(ds2)
self.assertDatasetIdentical(ds2, actual)
ds1 = Dataset({'x': np.nan})
ds2 = Dataset({'x': ('y', [np.nan, np.nan])})
actual = ds1.merge(ds2)
self.assertDatasetIdentical(ds2, actual)
def test_merge_compat(self):
ds1 = Dataset({'x': 0})
ds2 = Dataset({'x': 1})
for compat in ['broadcast_equals', 'equals', 'identical']:
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
ds1.merge(ds2, compat=compat)
ds2 = Dataset({'x': [0, 0]})
for compat in ['equals', 'identical']:
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
ds1.merge(ds2, compat=compat)
ds2 = Dataset({'x': ((), 0, {'foo': 'bar'})})
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
ds1.merge(ds2, compat='identical')
with self.assertRaisesRegexp(ValueError, 'compat=\S+ invalid'):
ds1.merge(ds2, compat='foobar')
def test_merge_auto_align(self):
ds1 = Dataset({'a': ('x', [1, 2])})
ds2 = Dataset({'b': ('x', [3, 4]), 'x': [1, 2]})
expected = Dataset({'a': ('x', [1, 2, np.nan]),
'b': ('x', [np.nan, 3, 4])})
self.assertDatasetIdentical(expected, ds1.merge(ds2))
self.assertDatasetIdentical(expected, ds2.merge(ds1))
expected = expected.isel(x=slice(2))
self.assertDatasetIdentical(expected, ds1.merge(ds2, join='left'))
self.assertDatasetIdentical(expected, ds2.merge(ds1, join='right'))
expected = expected.isel(x=slice(1, 2))
self.assertDatasetIdentical(expected, ds1.merge(ds2, join='inner'))
self.assertDatasetIdentical(expected, ds2.merge(ds1, join='inner'))
def test_getitem(self):
data = create_test_data()
self.assertIsInstance(data['var1'], DataArray)
self.assertVariableEqual(data['var1'], data.variables['var1'])
with self.assertRaises(KeyError):
data['notfound']
with self.assertRaises(KeyError):
data[['var1', 'notfound']]
actual = data[['var1', 'var2']]
expected = Dataset({'var1': data['var1'], 'var2': data['var2']})
self.assertDatasetEqual(expected, actual)
actual = data['numbers']
expected = DataArray(data['numbers'].variable, [data['dim3']],
name='numbers')
self.assertDataArrayIdentical(expected, actual)
actual = data[dict(dim1=0)]
expected = data.isel(dim1=0)
self.assertDatasetIdentical(expected, actual)
def test_virtual_variables(self):
# access virtual variables
data = create_test_data()
expected = DataArray(1 + np.arange(20), coords=[data['time']],
dims='time', name='dayofyear')
self.assertDataArrayIdentical(expected, data['time.dayofyear'])
self.assertArrayEqual(data['time.month'].values,
data.variables['time'].to_index().month)
self.assertArrayEqual(data['time.season'].values, 'DJF')
# test virtual variable math
self.assertArrayEqual(data['time.dayofyear'] + 1, 2 + np.arange(20))
self.assertArrayEqual(np.sin(data['time.dayofyear']),
np.sin(1 + np.arange(20)))
# ensure they become coordinates
expected = Dataset({}, {'dayofyear': data['time.dayofyear']})
actual = data[['time.dayofyear']]
self.assertDatasetEqual(expected, actual)
# non-coordinate variables
ds = Dataset({'t': ('x', pd.date_range('2000-01-01', periods=3))})
self.assertTrue((ds['t.year'] == 2000).all())
def test_time_season(self):
ds = Dataset({'t': pd.date_range('2000-01-01', periods=12, freq='M')})
expected = ['DJF'] * 2 + ['MAM'] * 3 + ['JJA'] * 3 + ['SON'] * 3 + ['DJF']
self.assertArrayEqual(expected, ds['t.season'])
def test_slice_virtual_variable(self):
data = create_test_data()
self.assertVariableEqual(data['time.dayofyear'][:10],
Variable(['time'], 1 + np.arange(10)))
self.assertVariableEqual(data['time.dayofyear'][0], Variable([], 1))
def test_setitem(self):
# assign a variable
var = Variable(['dim1'], np.random.randn(8))
data1 = create_test_data()
data1['A'] = var
data2 = data1.copy()
data2['A'] = var
self.assertDatasetIdentical(data1, data2)
# assign a dataset array
dv = 2 * data2['A']
data1['B'] = dv.variable
data2['B'] = dv
self.assertDatasetIdentical(data1, data2)
# can't assign an ND array without dimensions
with self.assertRaisesRegexp(ValueError,
'dimensions .* must have the same len'):
data2['C'] = var.values.reshape(2, 4)
# but can assign a 1D array
data1['C'] = var.values
data2['C'] = ('C', var.values)
self.assertDatasetIdentical(data1, data2)
# can assign a scalar
data1['scalar'] = 0
data2['scalar'] = ([], 0)
self.assertDatasetIdentical(data1, data2)
# can't use the same dimension name as a scalar var
with self.assertRaisesRegexp(ValueError, 'cannot merge'):
data1['newvar'] = ('scalar', [3, 4, 5])
# can't resize a used dimension
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
data1['dim1'] = data1['dim1'][:5]
# override an existing value
data1['A'] = 3 * data2['A']
self.assertVariableEqual(data1['A'], 3 * data2['A'])
with self.assertRaises(NotImplementedError):
data1[{'x': 0}] = 0
def test_setitem_auto_align(self):
ds = Dataset()
ds['x'] = ('y', range(3))
ds['y'] = 1 + np.arange(3)
expected = Dataset({'x': ('y', range(3)), 'y': 1 + np.arange(3)})
self.assertDatasetIdentical(ds, expected)
ds['y'] = DataArray(range(3), dims='y')
expected = Dataset({'x': ('y', range(3))})
self.assertDatasetIdentical(ds, expected)
ds['x'] = DataArray([1, 2], dims='y')
expected = Dataset({'x': ('y', [1, 2, np.nan])})
self.assertDatasetIdentical(ds, expected)
ds['x'] = 42
expected = Dataset({'x': 42, 'y': range(3)})
self.assertDatasetIdentical(ds, expected)
ds['x'] = DataArray([4, 5, 6, 7], dims='y')
expected = Dataset({'x': ('y', [4, 5, 6])})
self.assertDatasetIdentical(ds, expected)
def test_delitem(self):
data = create_test_data()
all_items = set(data)
self.assertItemsEqual(data, all_items)
del data['var1']
self.assertItemsEqual(data, all_items - set(['var1']))
del data['dim1']
self.assertItemsEqual(data, set(['time', 'dim2', 'dim3', 'numbers']))
self.assertNotIn('dim1', data.dims)
self.assertNotIn('dim1', data.coords)
def test_squeeze(self):
data = Dataset({'foo': (['x', 'y', 'z'], [[[1], [2]]])})
for args in [[], [['x']], [['x', 'z']]]:
def get_args(v):
return [set(args[0]) & set(v.dims)] if args else []
expected = Dataset(dict((k, v.squeeze(*get_args(v)))
for k, v in iteritems(data.variables)))
expected.set_coords(data.coords, inplace=True)
self.assertDatasetIdentical(expected, data.squeeze(*args))
# invalid squeeze
with self.assertRaisesRegexp(ValueError, 'cannot select a dimension'):
data.squeeze('y')
def test_groupby(self):
data = Dataset({'z': (['x', 'y'], np.random.randn(3, 5))},
{'x': ('x', list('abc')),
'c': ('x', [0, 1, 0])})
groupby = data.groupby('x')
self.assertEqual(len(groupby), 3)
expected_groups = {'a': 0, 'b': 1, 'c': 2}
self.assertEqual(groupby.groups, expected_groups)
expected_items = [('a', data.isel(x=0)),
('b', data.isel(x=1)),
('c', data.isel(x=2))]
for actual, expected in zip(groupby, expected_items):
self.assertEqual(actual[0], expected[0])
self.assertDatasetEqual(actual[1], expected[1])
identity = lambda x: x
for k in ['x', 'c', 'y']:
actual = data.groupby(k, squeeze=False).apply(identity)
self.assertDatasetEqual(data, actual)
def test_groupby_returns_new_type(self):
data = Dataset({'z': (['x', 'y'], np.random.randn(3, 5))})
actual = data.groupby('x').apply(lambda ds: ds['z'])
expected = data['z']
self.assertDataArrayIdentical(expected, actual)
actual = data['z'].groupby('x').apply(lambda x: x.to_dataset())
expected = data
self.assertDatasetIdentical(expected, actual)
def test_groupby_iter(self):
data = create_test_data()
for n, (t, sub) in enumerate(list(data.groupby('dim1'))[:3]):
self.assertEqual(data['dim1'][n], t)
self.assertVariableEqual(data['var1'][n], sub['var1'])
self.assertVariableEqual(data['var2'][n], sub['var2'])
self.assertVariableEqual(data['var3'][:, n], sub['var3'])
def test_groupby_errors(self):
data = create_test_data()
with self.assertRaisesRegexp(ValueError, 'must be 1 dimensional'):
data.groupby('var1')
with self.assertRaisesRegexp(ValueError, 'must have a name'):
data.groupby(np.arange(10))
with self.assertRaisesRegexp(ValueError, 'length does not match'):
data.groupby(data['dim1'][:3])
with self.assertRaisesRegexp(ValueError, "must have a 'dims'"):
data.groupby(data.coords['dim1'].to_index())
def test_groupby_reduce(self):
data = Dataset({'xy': (['x', 'y'], np.random.randn(3, 4)),
'xonly': ('x', np.random.randn(3)),
'yonly': ('y', np.random.randn(4)),
'letters': ('y', ['a', 'a', 'b', 'b'])})
expected = data.mean('y')
actual = data.groupby('x').mean()
self.assertDatasetAllClose(expected, actual)
actual = data.groupby('x').mean('y')
self.assertDatasetAllClose(expected, actual)
letters = data['letters']
expected = Dataset({'xy': data['xy'].groupby(letters).mean(),
'xonly': data['xonly'].mean(),
'yonly': data['yonly'].groupby(letters).mean()})
actual = data.groupby('letters').mean()
self.assertDatasetAllClose(expected, actual)
def test_groupby_math(self):
reorder_dims = lambda x: x.transpose('dim1', 'dim2', 'dim3', 'time')
ds = create_test_data()
for squeeze in [True, False]:
grouped = ds.groupby('dim1', squeeze=squeeze)
expected = reorder_dims(ds + ds.coords['dim1'])
actual = grouped + ds.coords['dim1']
self.assertDatasetIdentical(expected, reorder_dims(actual))
actual = ds.coords['dim1'] + grouped
self.assertDatasetIdentical(expected, reorder_dims(actual))
ds2 = 2 * ds
expected = reorder_dims(ds + ds2)
actual = grouped + ds2
self.assertDatasetIdentical(expected, reorder_dims(actual))
actual = ds2 + grouped
self.assertDatasetIdentical(expected, reorder_dims(actual))
grouped = ds.groupby('numbers')
zeros = DataArray([0, 0, 0, 0], [('numbers', range(4))])
expected = ds
actual = grouped + zeros
self.assertDatasetEqual(expected, actual)
actual = zeros + grouped
self.assertDatasetEqual(expected, actual)
with self.assertRaisesRegexp(TypeError, 'only support arithmetic'):
grouped + 1
with self.assertRaisesRegexp(TypeError, 'only support arithmetic'):
grouped + grouped
def test_groupby_math_virtual(self):
ds = Dataset({'x': ('t', [1, 2, 3])},
{'t': | pd.date_range('20100101', periods=3) | pandas.date_range |
import networkx as nx
import numpy as np
import pandas as pd
from quetzal.analysis import analysis
from quetzal.engine import engine, nested_logit
from quetzal.engine.park_and_ride_pathfinder import ParkRidePathFinder
from quetzal.engine.pathfinder import PublicPathFinder
from quetzal.engine.road_pathfinder import RoadPathFinder
from quetzal.model import model, optimalmodel, parkridemodel
from syspy.assignment import raw as raw_assignment
from syspy.assignment.raw import fast_assign as assign
from syspy.skims import skims
from tqdm import tqdm
def read_hdf(filepath):
m = TransportModel()
m.read_hdf(filepath)
return m
def read_json(folder, **kwargs):
m = TransportModel()
m.read_json(folder, **kwargs)
return m
track_args = model.track_args
log = model.log
class TransportModel(optimalmodel.OptimalModel, parkridemodel.ParkRideModel):
@track_args
def step_distribution(
self,
segmented=False,
deterrence_matrix=None,
**od_volume_from_zones_kwargs
):
"""
* requires: zones
* builds: volumes
:param deterrence_matrix: an OD unstaked dataframe representing the disincentive to
travel as distance/time/cost increases.
:param od_volume_from_zones_kwargs: if the friction matrix is not
provided, it will be automatically computed using a gravity distribution which
uses the following parameters:
* param power: (int) the gravity exponent
* param intrazonal: (bool) set the intrazonal distance to 0 if False,
compute a characteristic distance otherwise.
if segmented=True: all parameters must be given in dict {segment: param}
"""
if segmented:
self.volumes = pd.DataFrame(columns=['origin', 'destination'])
kwargs = od_volume_from_zones_kwargs.copy()
if 'deterrence_matrix' not in kwargs.keys():
kwargs['deterrence_matrix'] = deterrence_matrix if deterrence_matrix is not None else {}
if 'power' not in kwargs.keys():
kwargs['power'] = {}
if 'intrazonal' not in kwargs.keys():
kwargs['intrazonal'] = {}
for segment in self.segments:
print(segment)
cols = ['geometry', (segment, 'emission'), (segment, 'attraction')]
if 'area' in self.zones:
cols += ['area']
segment_zones = self.zones[cols].rename(
columns={
(segment, 'emission'): 'emission',
(segment, 'attraction'): 'attraction'
}
)
segment_volumes = engine.od_volume_from_zones(
segment_zones,
deterrence_matrix=kwargs['deterrence_matrix'].get(segment, None),
coordinates_unit=self.coordinates_unit,
power=kwargs['power'].get(segment, 2),
intrazonal=kwargs['intrazonal'].get(segment, False)
)
segment_volumes.rename(columns={'volume': segment}, inplace=True)
self.volumes = self.volumes.merge(
segment_volumes,
on=['origin', 'destination'],
how='outer'
)
self.volumes['all'] = self.volumes[self.segments].T.sum()
else:
self.volumes = engine.od_volume_from_zones(
self.zones,
deterrence_matrix,
coordinates_unit=self.coordinates_unit,
**od_volume_from_zones_kwargs
)
@track_args
def step_pathfinder(
self,
walk_on_road=False,
complete=True,
**kwargs
):
"""
* requires: links, footpaths, zone_to_transit, zone_to_road
* builds: pt_los
"""
assert self.links['time'].isnull().sum() == 0
self.links = engine.graph_links(self.links)
self.walk_on_road = walk_on_road
if walk_on_road:
footpaths = self.road_links.copy()
footpaths['time'] = footpaths['walk_time']
ntlegs = self.zone_to_road
nodes = self.road_nodes
else:
footpaths = self.footpaths
ntlegs = self.zone_to_transit
nodes = self.nodes
# TODO even with walk on road, transit nodes may not belong to road_nodes
self.pt_los, self.graph = engine.path_and_duration_from_links_and_ntlegs(
self.links,
ntlegs=ntlegs,
pole_set=set(self.zones.index),
footpaths=footpaths,
**kwargs
)
if complete:
self.pt_los = analysis.path_analysis_od_matrix(
od_matrix=self.pt_los,
links=self.links,
nodes=nodes,
centroids=self.centroids,
)
@track_args
def step_road_pathfinder(self, maxiters=1, *args, **kwargs):
"""
* requires: zones, road_links, zone_to_road
* builds: car_los, road_links
"""
roadpathfinder = RoadPathFinder(self)
roadpathfinder.frank_wolfe(maxiters=maxiters, *args, **kwargs)
self.car_los = roadpathfinder.car_los
self.road_links = roadpathfinder.road_links
@track_args
def step_pr_pathfinder(
self,
force=False,
path_analysis=True,
**kwargs
):
if not force:
sets = ['nodes', 'links', 'zones', 'road_nodes', 'road_links']
self.integrity_test_collision(sets)
self.links = engine.graph_links(self.links)
parkridepathfinder = ParkRidePathFinder(self)
parkridepathfinder.find_best_path(**kwargs)
self.pr_los = parkridepathfinder.paths
if path_analysis:
analysis_nodes = pd.concat([self.nodes, self.road_nodes])
analysis_links = pd.concat([self.links, self.road_links])
self.pr_los = analysis.path_analysis_od_matrix(
od_matrix=self.pr_los,
links=self.links,
nodes=analysis_nodes,
centroids=self.centroids,
) # analyse non vérifiée, prise directement depuis pt_los
@track_args
def step_pt_pathfinder(
self,
broken_routes=True,
broken_modes=True,
route_column='route_id',
mode_column='route_type',
boarding_time=None,
speedup=False,
walk_on_road=False,
# keep_graph=False,
keep_pathfinder=False,
force=False,
path_analysis=True,
**kwargs
):
"""
* requires: zones, links, footpaths, zone_to_road, zone_to_transit
* builds: pt_los
"""
sets = ['nodes', 'links', 'zones']
if walk_on_road:
sets += ['road_nodes', 'road_links']
if not force:
self.integrity_test_collision(sets)
self.links = engine.graph_links(self.links)
publicpathfinder = PublicPathFinder(self, walk_on_road=walk_on_road)
publicpathfinder.find_best_paths(
broken_routes=broken_routes,
broken_modes=broken_modes,
route_column=route_column,
mode_column=mode_column,
speedup=speedup,
boarding_time=boarding_time,
**kwargs
)
# if keep_graph:
# self.nx_graph=publicpathfinder.nx_graph
if keep_pathfinder:
self.publicpathfinder = publicpathfinder
self.pt_los = publicpathfinder.paths
analysis_nodes = pd.concat([self.nodes, self.road_nodes]) if walk_on_road else self.nodes
if path_analysis:
self.pt_los = analysis.path_analysis_od_matrix(
od_matrix=self.pt_los,
links=self.links,
nodes=analysis_nodes,
centroids=self.centroids,
)
@track_args
def step_concatenate_los(self):
"""
* requires: pt_los, car_los
* builds: los
"""
pass
@track_args
def step_modal_split(self, build_od_stack=True, **modal_split_kwargs):
"""
* requires: volumes, los
* builds: od_stack, shared
:param modal_split_kwargs: kwargs of engine.modal_split
example:
::
sm.step_modal_split(
time_scale=1/1800,
alpha_car=2,
beta_car=600
)
"""
shared = engine.modal_split_from_volumes_and_los(
self.volumes,
self.los,
**modal_split_kwargs
)
# shared['distance_car'] = shared['distance']
if build_od_stack:
self.od_stack = analysis.volume_analysis_od_matrix(shared)
self.shared = shared
def compute_los_volume(self, time_expanded=False, keep_segments=True):
los = self.los if not time_expanded else self.te_los
segments = self.segments
probabilities = [(segment, 'probability') for segment in segments]
shared_cols = list(set(self.volumes.columns).intersection(set(los.columns)))
on = [col for col in shared_cols if col in ['origin', 'destination', 'wished_departure_time']]
left = los[on + probabilities]
left['index'] = left.index
df = pd.merge(left, self.volumes, on=on).set_index('index')
df = df.reindex(los.index)
values = df[probabilities].values * df[segments].values
i = 0
for segment in segments:
los[segment] = values.T[i]
i += 1
los['volume'] = np.nansum(values, axis=1)
if time_expanded:
los_volumes = self.te_los.groupby('path_id')[['volume'] + segments].sum()
path_id_list = list(self.los['path_id'])
volume_values = los_volumes.reindex(path_id_list).fillna(0).values
for c in los_volumes.columns:
self.los[c] = np.nan # create_columns
self.los.loc[:, los_volumes.columns] = volume_values
def step_assignment(
self,
road=False,
boardings=False,
boarding_links=False,
alightings=False,
alighting_links=False,
transfers=False,
segmented=False,
time_expanded=False,
compute_los_volume=True
):
if compute_los_volume:
self.compute_los_volume(time_expanded=time_expanded)
los = self.los.copy()
column = 'link_path'
l = los.dropna(subset=[column])
l = l.loc[l['volume'] > 0]
self.links['volume'] = assign(l['volume'], l[column])
if road:
self.road_links[('volume', 'car')] = assign(l['volume'], l[column])
if 'road_link_list' in self.links.columns:
to_assign = self.links.dropna(subset=['volume', 'road_link_list'])
self.road_links[('volume', 'pt')] = assign(
to_assign['volume'],
to_assign['road_link_list']
)
if boardings and not boarding_links:
print('to assign boardings on links pass boarding_links=True')
if boarding_links:
column = 'boarding_links'
l = los.dropna(subset=[column])
self.links['boardings'] = assign(l['volume'], l[column])
if boardings:
column = 'boardings'
l = los.dropna(subset=[column])
self.nodes['boardings'] = assign(l['volume'], l[column])
if alighting_links:
column = 'alighting_links'
l = los.dropna(subset=[column])
self.links['alightings'] = assign(l['volume'], l[column])
if alightings:
column = 'alightings'
l = los.dropna(subset=[column])
self.nodes['alightings'] = assign(l['volume'], l[column])
if transfers:
column = 'transfers'
l = los.dropna(subset=[column])
self.nodes['transfers'] = assign(l['volume'], l[column])
if segmented:
self.segmented_assigment(
road=road,
boardings=boardings, alightings=alightings, transfers=transfers,
aggregated_los=los
)
def segmented_assigment(
self,
road=False,
boardings=False,
alightings=False,
transfers=False,
aggregated_los=None
):
los = aggregated_los if aggregated_los is not None else self.los
for segment in self.segments:
column = 'link_path'
l = los.dropna(subset=[column])
self.links[segment] = assign(l[segment], l[column])
if road:
self.road_links[(segment, 'car')] = assign(l[segment], l[column])
self.road_links[(segment, 'pt')] = assign(
self.links[segment],
self.links['road_link_list']
)
if boardings:
column = 'boarding_links'
l = los.dropna(subset=[column])
self.links[(segment, 'boardings')] = assign(l[segment], l[column])
column = 'boardings'
l = los.dropna(subset=[column])
self.nodes[(segment, 'boardings')] = assign(l[segment], l[column])
if alightings:
column = 'alighting_links'
l = los.dropna(subset=[column])
self.links[(segment, 'alightings')] = assign(l[segment], l[column])
column = 'alightings'
l = los.dropna(subset=[column])
self.nodes[(segment, 'alightings')] = assign(l[segment], l[column])
if transfers:
column = 'transfers'
l = los.dropna(subset=[column])
self.nodes[(segment, 'transfers')] = assign(l[segment], l[column])
@track_args
def step_pt_assignment(
self,
volume_column=None,
on_road_links=False,
split_by=None,
**kwargs
):
"""
Assignment step
* requires: links, nodes, pt_los, road_links, volumes, path_probabilities
* builds: loaded_links, loaded_nodes, add load to road_links
:param volume_column: volume column of self.volumes to assign. If none, all columns will be assigned
:param on_road_links: if True, performs pt assignment on road_links as well
:param split_by: path categories to be tracked in the assignment. Must be a column of self.pt_los
example:
::
sm.step_assignment(
volume_column=None,
on_road_links=False,
split_by='route_type',
boardings=True,
alightings=True,
transfers=True
}
)
"""
if volume_column is None:
self.segmented_pt_assignment(
on_road_links=on_road_links,
split_by=split_by,
**kwargs
)
return
# When split_by is not None, this call could be replaced by a sum, provided
# prior dumb definition of loaded_links and loaded_nodes
self.loaded_links, self.loaded_nodes = engine.loaded_links_and_nodes(
self.links,
self.nodes,
volumes=self.volumes,
path_finder_stack=self.pt_los,
volume_column=volume_column,
**kwargs
)
# Rename columns
self.loaded_links.rename(columns={volume_column: ('load', volume_column)}, inplace=True)
self.loaded_nodes.rename(columns={volume_column: ('load', volume_column)}, inplace=True)
for col in list(set(['boardings', 'alightings', 'transfers']).intersection(kwargs.keys())):
self.loaded_links.rename(columns={col: (col, volume_column)}, inplace=True)
self.loaded_nodes.rename(columns={col: (col, volume_column)}, inplace=True)
# Group assignment
if split_by is not None:
groups = self.pt_los[split_by].unique()
for group in groups:
# TODO remove rows with empty link_path
group_pt_los = self.pt_los.loc[self.pt_los[split_by] == group]
group_loaded_links, group_loaded_nodes = engine.loaded_links_and_nodes(
self.links,
self.nodes,
volumes=self.volumes,
path_finder_stack=group_pt_los,
volume_column=volume_column,
**kwargs
)
# Append results columns
self.loaded_links[('load', volume_column, group)] = group_loaded_links[volume_column]
self.loaded_nodes[('load', volume_column, group)] = group_loaded_nodes[volume_column]
for col in list(set(['boardings', 'alightings', 'transfers']).intersection(kwargs.keys())):
self.loaded_links[(col, volume_column, group)] = group_loaded_links[col]
self.loaded_nodes[(col, volume_column, group)] = group_loaded_nodes[col]
# Assignment on road_links
if on_road_links:
if 'road_link_path' not in self.pt_los.columns:
# create road_link_path column from networkcasted linkss if not already defined
self._analysis_road_link_path()
merged = pd.merge(self.pt_los, self.volumes, on=['origin', 'destination'])
merged['to_assign'] = merged[(volume_column, 'probability')] * merged[volume_column].fillna(0)
if split_by is not None:
def assign_group(g):
x = g.reset_index()
result = raw_assignment.assign(x['to_assign'], x['road_link_path'])
return result
group_assigned = merged.groupby(split_by).apply(assign_group)
assigned = group_assigned.unstack().T.loc['volume'].fillna(0)
# Add empty groups
for empty in list(set(groups).difference(set(assigned.columns))):
assigned[empty] = 0
self.road_links[[(volume_column, col) for col in groups]] = assigned[[col for col in groups]]
self.road_links[volume_column] = assigned.T.sum()
else: # no groups
assigned = raw_assignment.assign(merged['to_assign'], merged['road_link_path'])
self.road_links[volume_column] = assigned['volume']
# todo remove 'load' from analysis module:
self.road_links['load'] = self.road_links[volume_column]
def segmented_pt_assignment(self, split_by=None, on_road_links=False, *args, **kwargs):
"""
Performs pt assignment for all demand segments.
Requires computed path probabilities in pt_los for each segment.
"""
segments = self.segments
iterator = tqdm(segments)
for segment in iterator:
iterator.desc = str(segment)
# Assign demand segment
self.step_pt_assignment(
volume_column=segment,
path_pivot_column=(segment, 'probability'),
split_by=split_by,
on_road_links=on_road_links,
**kwargs
)
# Update links and nodes to keep results as loaded links and nodes
# are erased at each call of step_pt_assignment
self.links = self.loaded_links
self.nodes = self.loaded_nodes
# Group assignment results: sum over demand segments
try:
groups = self.pt_los[split_by].unique()
except KeyError:
groups = []
cols = ['load']
# Add boardings, alightings and transfers if processed
cols += list(set(['boardings', 'alightings', 'transfers']).intersection(kwargs.keys()))
for col in cols:
for g in groups:
columns = [tuple([col, s, g]) for s in segments]
name = tuple([col, g])
self.loaded_links[name] = self.loaded_links[columns].T.sum()
self.loaded_links.drop(columns, 1, inplace=True)
self.loaded_nodes[name] = self.loaded_nodes[columns].T.sum()
self.loaded_nodes.drop(columns, 1, inplace=True)
columns = [tuple([col, s]) for s in segments]
self.loaded_links[col] = self.loaded_links[columns].T.sum()
self.loaded_links.drop(columns, 1, inplace=True)
self.loaded_nodes[col] = self.loaded_nodes[columns].T.sum()
self.loaded_nodes.drop(columns, 1, inplace=True)
if on_road_links:
for group in groups:
self.road_links[('all', group)] = self.road_links[[(s, group) for s in segments]].T.sum()
self.road_links.drop([(s, group) for s in segments], 1, inplace=True)
self.road_links['load'] = self.road_links[[s for s in segments]].T.sum()
self.road_links.drop([s for s in segments], 1, inplace=True)
def step_car_assignment(self, volume_column=None):
"""
Assignment step
* requires: road_links, car_los, road_links, volumes, path_probabilities
* builds: loaded_road_links
"""
if volume_column is None:
self.segmented_car_assignment()
def segmented_car_assignment(self):
segments = self.segments
iterator = tqdm(segments)
for segment in iterator:
iterator.desc = str(segment)
merged = pd.merge(self.car_los, self.volumes, on=['origin', 'destination'])
merged['to_assign'] = merged[(segment, 'probability')] * merged[segment].fillna(0)
assigned = raw_assignment.assign(merged['to_assign'], merged['link_path']).fillna(0)
self.road_links[(segment, 'car')] = assigned
columns = [(segment, 'car') for segment in self.segments]
self.road_links[('all', 'car')] = self.road_links[columns].T.sum()
# TODO Merge conflict: TO CHECK WITH ACCRA
# self.road_links.drop(columns, 1, inplace=True)
# if not 'load' in self.road_links.columns:
# self.road_links['load'] = 0
# self.road_links['load'] += self.road_links[('all','car')]
# TODO move all utility features to another object / file
def analysis_mode_utility(self, how='min', segment=None, segments=None, time_expanded=False):
"""
* requires: mode_utility, los, utility_values
* builds: los
"""
if segment is None:
for segment in tqdm(self.segments):
self.analysis_mode_utility(how=how, segment=segment, time_expanded=time_expanded)
return
if time_expanded:
logit_los = self.te_los
else:
logit_los = self.los
mode_utility = self.mode_utility[segment].to_dict()
if how == 'main': # the utility of the 'route_type' is used
logit_los['mode_utility'] = logit_los['route_type'].apply(mode_utility.get)
else : # how = 'min', 'max', 'mean', 'sum'
# route type utilities
rtu = {
rt: get_combined_mode_utility(
rt, how=how, mode_utility=mode_utility
)
for rt in logit_los['route_types'].unique()
}
logit_los['mode_utility'] = logit_los['route_types'].map(rtu.get)
utility_values = self.utility_values[segment].to_dict()
u = 0
for key, value in utility_values.items():
u += value * logit_los[key]
logit_los[(segment, 'utility')] = u
logit_los[(segment, 'utility')] = logit_los[(segment, 'utility')]
def analysis_utility(self, segment='root', time_expanded=False, how='min'):
"""
* requires: mode_utility, los, utility_values
* builds: los
"""
if segment is None:
for segment in self.segments:
print(segment)
self.analysis_mode_utility(how=how, segment=segment, time_expanded=time_expanded)
return
if time_expanded:
los = self.te_los
else:
los = self.los
utility_values = self.utility_values[segment].to_dict()
u = 0
for key, value in utility_values.items():
u += value * los[key]
los[(segment, 'utility')] = u
los[(segment, 'utility')] = los[(segment, 'utility')].astype(float)
def initialize_logit(self):
zones = list(self.zones.index)
od = pd.DataFrame(index= | pd.MultiIndex.from_product([zones, zones]) | pandas.MultiIndex.from_product |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
description: provide 24hr feedback to clinicians
version: 0.0.1
created: 2018-08-01
author: <NAME>
dependencies:
* requires tidepool-analytics-env (see readme for instructions)
* requires a clinician or study username (email) and password
* requires tidals (tidepool data analytics tools)
license: BSD-2-Clause
"""
# %% REQUIRED LIBRARIES
import pandas as pd
import numpy as np
import os
import sys
import requests
import json
import argparse
import getpass
from pytz import timezone
from datetime import timedelta
import datetime as dt
import subprocess as sub
tidalsPath = os.path.abspath(os.path.join(__file__, "..", "..", "..", "tidals"))
if tidalsPath not in sys.path:
sys.path.insert(0, tidalsPath)
import tidals as td
envPath = os.path.abspath(os.path.join(__file__, "..", "..", "..",
"get-qualify-export-donor-data"))
if envPath not in sys.path:
sys.path.insert(0, envPath)
import environmentalVariables
# %% USER INPUTS
codeDescription = "Provide feedback of last 24 hours (6am to 6am) to clinicians"
parser = argparse.ArgumentParser(description=codeDescription)
parser.add_argument("-d",
"--date-stamp",
dest="dateStamp",
default=dt.datetime.now().strftime("%Y-%m-%d"),
help="date of the daily report, defaults to current date")
parser.add_argument("-a",
"--accountAlias",
dest="accountAlias",
default=np.nan,
help="enter an account alias so the master clinician or study account" +
"can be looked up in your environmental variables, OR leave this blank" +
"and you will be prompted to enter in account credentials")
parser.add_argument("-o",
"--output-data-path",
dest="outputPath",
default=os.path.abspath(os.path.join(".", "data")),
help="the output path where the data is stored")
parser.add_argument("-v",
"--verbose",
dest="verboseOutput",
default=True,
help="True if you want script progress to print to the console")
args = parser.parse_args()
# %% CHECK/DECLARE INPUTS AND OUTPUT VARIABLES
if pd.isnull(args.accountAlias):
os.environ["TEMP_EMAIL"] = getpass.getpass(prompt="email: ")
os.environ["TEMP_PASSWORD"] = getpass.getpass(prompt="password: ")
if (pd.isnull(os.environ["TEMP_EMAIL"]) | pd.isnull(os.environ["TEMP_PASSWORD"])):
sys.exit("error in entering user email and password")
else:
os.environ["TEMP_EMAIL"] = os.environ[args.accountAlias + "_EMAIL"]
os.environ["TEMP_PASSWORD"] = os.environ[args.accountAlias + "_PASSWORD"]
# create output folder if it doesn't exist
if not os.path.isdir(args.outputPath):
os.makedirs(args.outputPath)
# create a report output folder if it doesn't exist
reportDate = args.dateStamp
reportPath = os.path.join(args.outputPath, "reports")
reportOutputPath = os.path.join(reportPath, reportDate)
if not os.path.isdir(reportPath):
os.makedirs(reportPath)
os.makedirs(reportOutputPath)
indvidualDataFolder = os.path.join(reportOutputPath, "individual-data-files")
if not os.path.isdir(indvidualDataFolder):
os.makedirs(indvidualDataFolder)
# create a metadata output folder if it doesn't exist
metadataPath = os.path.join(args.outputPath, "metadata", reportDate)
jsonDataPath = os.path.join(metadataPath, "jsonData")
if not os.path.isdir(metadataPath):
os.makedirs(metadataPath)
os.makedirs(jsonDataPath)
allStats = pd.DataFrame()
metaData = pd.DataFrame(columns=["userID",
"studyID",
"getData.response1",
"getData.response2",
"nDuplicatesRemoved"])
# %% FUNCTIONS
def get_stats(df):
statDF = pd.DataFrame(index=[0])
statDF["totalNumberCBGValues"] = df.mg_dL.count()
statDF["mean_mgdL"] = df.mg_dL.mean()
statDF["std_mgdL"] = df.mg_dL.std()
statDF["cov_mgdL"] = statDF["std_mgdL"] / statDF["mean_mgdL"]
statDF["totalBelow54"] = sum(df.mg_dL < 54)
statDF["totalBelow70"] = sum(df.mg_dL < 70)
statDF["total54to70"] = sum((df.mg_dL >= 54) & (df.mg_dL < 70))
statDF["total70to140"] = sum((df.mg_dL >= 70) & (df.mg_dL <= 140))
statDF["total70to180"] = sum((df.mg_dL >= 70) & (df.mg_dL <= 180))
statDF["total180to250"] = sum((df.mg_dL > 180) & (df.mg_dL <= 250))
statDF["totalAbove180"] = sum(df.mg_dL > 180)
statDF["totalAbove250"] = sum(df.mg_dL > 250)
statDF["percentBelow54"] = statDF["totalBelow54"] / statDF["totalNumberCBGValues"]
statDF["percentBelow70"] = statDF["totalBelow70"] / statDF["totalNumberCBGValues"]
statDF["percent70to140"] = statDF["total70to140"] / statDF["totalNumberCBGValues"]
statDF["percent70to180"] = statDF["total70to180"] / statDF["totalNumberCBGValues"]
statDF["percentAbove180"] = statDF["totalAbove180"] / statDF["totalNumberCBGValues"]
statDF["percentAbove250"] = statDF["totalAbove250"] / statDF["totalNumberCBGValues"]
statDF["min_mgdL"] = df.mg_dL.min()
statDF["median_mgdL"] = df.mg_dL.describe()["50%"]
statDF["max_mgdL"] = df.mg_dL.max()
# calculate the start and end time of the cbg data
startTime = df["localTime"].min()
statDF["startTime"] = startTime
endTime = df["localTime"].max()
statDF["endTime"] = endTime
statDF["totalNumberPossibleCBGvalues"] = len(pd.date_range(startTime, endTime, freq="5min"))
# feedback criteria
# A. incomplete dataset
statDF["percentOfExpectedData"] = \
(((endTime - startTime).days * 86400) +
((endTime - startTime).seconds)) / (86400 - (5*60))
if statDF.loc[0, "percentOfExpectedData"] < 0.834: # greater than 4 hours of expected data
statDF["GTE4hoursNoCgmSignal"] = "NA"
statDF["incompleteDataset"] = "FLAG (" + \
str(round(statDF.loc[0, "percentOfExpectedData"] * 100, 1)) + "%)"
else:
statDF["incompleteDataset"] = np.nan
# 1. >=4 hours without CGM signal
missingCgm = statDF["totalNumberPossibleCBGvalues"] - statDF["totalNumberCBGValues"]
if missingCgm[0] > (4 * 60 / 5):
statDF["GTE4hoursNoCgmSignal"] = "FLAG"
else:
statDF["GTE4hoursNoCgmSignal"] = np.nan
# 2. >= 2 hours 54 <= BG < 70 mg/dl
if statDF.loc[0, "total54to70"] > (2 * 60 / 5):
statDF["GTE2hoursBetween54to70"] = \
"FLAG (" + str(round(statDF.loc[0, "total54to70"] * 5)) + "min)"
else:
statDF["GTE2hoursBetween54to70"] = np.nan
# 3. >= 15 minutes < 54 mg/dl"
if statDF.loc[0, "totalBelow54"] > (15 / 5):
statDF["GTE15minBelow54"] = "FLAG (" + str(round(statDF.loc[0, "totalBelow54"] * 5)) + "min)"
else:
statDF["GTE15minBelow54"] = np.nan
return statDF
def sort_and_pretty_stat_output(df):
for col in list(df):
if (("percent" in col) | ("cov" in col)):
df[col] = round(df[col] * 100, 1)
for col in ["mean_mgdL", "std_mgdL"]:
df[col] = round(df[col], 1)
df = df[["studyID",
"incompleteDataset",
"GTE4hoursNoCgmSignal",
"GTE2hoursBetween54to70",
"GTE15minBelow54",
"totalNumberCBGValues",
"totalNumberPossibleCBGvalues",
"startTime",
"endTime",
"percentOfExpectedData",
"mean_mgdL",
"std_mgdL",
"cov_mgdL",
"min_mgdL",
"median_mgdL",
"max_mgdL",
"percentBelow54",
"percentBelow70",
"percent70to140",
"percent70to180",
"percentAbove180",
"percentAbove250",
"totalBelow54",
"totalBelow70",
"total54to70",
"total70to140",
"total70to180",
"total180to250",
"totalAbove180",
"totalAbove250"]]
return df
def get_timeZoneOffset(currentDate, userTz):
tz = timezone(userTz)
tzoNum = int(tz.localize(pd.to_datetime(currentDate) + timedelta(days=1)).strftime("%z"))
tzoHours = np.floor(tzoNum / 100)
tzoMinutes = round((tzoNum / 100 - tzoHours) * 100, 0)
tzoSign = np.sign(tzoHours)
tzo = int((tzoHours * 60) + (tzoMinutes * tzoSign))
return tzo
def get_donor_lists(email, password, outputDonorList):
p = sub.Popen(["getusers", email,
"-p", password, "-o",
outputDonorList, "-v"], stdout=sub.PIPE, stderr=sub.PIPE)
output, errors = p.communicate()
output = output.decode("utf-8")
errors = errors.decode("utf-8")
if output.startswith("Successful login.\nSuccessful") is False:
sys.exit("ERROR with" + email +
" ouput: " + output +
" errorMessage: " + errors)
return
def load_donors(outputDonorList):
donorList = []
if os.stat(outputDonorList).st_size > 0:
donorList = pd.read_csv(outputDonorList,
header=None,
usecols=[0, 1],
names=["userID", "name"],
low_memory=False)
return donorList
def get_json_data(email, password, userid, outputFilePathName, startDate, endDate):
url1 = "https://api.tidepool.org/auth/login"
myResponse = requests.post(url1, auth=(email, password))
if(myResponse.ok):
xtoken = myResponse.headers["x-tidepool-session-token"]
url2 = "https://api.tidepool.org/data/" + userid + \
"?endDate=" + endDate.strftime("%Y-%m-%d") + \
"T23:59:59.000Z&startDate=" + \
startDate.strftime("%Y-%m-%d") + "T00:00:00.000Z"
headers = {
"x-tidepool-session-token": xtoken,
"Content-Type": "application/json"
}
myResponse2 = requests.get(url2, headers=headers)
if(myResponse2.ok):
usersData = json.loads(myResponse2.content.decode())
with open(outputFilePathName, "w") as outfile:
json.dump(usersData, outfile)
if args.verboseOutput == True:
print("successfully downloaded to " + outputFilePathName)
else:
print("ERROR", myResponse2.status_code)
else:
print("ERROR", myResponse.status_code)
myResponse2 = np.nan
return myResponse, myResponse2
# %% START OF CODE
# get the list of donors if it doesn't already exist
outputDonorList = os.path.abspath(os.path.join(args.outputPath, "PHI-study-participants.csv"))
if not os.path.exists(outputDonorList):
get_donor_lists(os.environ["TEMP_EMAIL"], os.environ["TEMP_PASSWORD"], outputDonorList)
# load in the donor list
studyPartipants = load_donors(outputDonorList)
# deal with a specific use case called telet1d
if args.accountAlias in ["TELET1D"]:
studyPartipants = studyPartipants[studyPartipants["name"] !=
"<NAME>"].sort_values("name").reset_index(drop=True)
studyPartipants.to_csv(outputDonorList, index_label="dIndex")
else:
studyPartipants = pd.read_csv(outputDonorList, index_col="dIndex", low_memory=False)
for dIndex in studyPartipants.index:
userID = studyPartipants.userID[dIndex]
studyID = studyPartipants["name"][dIndex]
metaData.loc[dIndex, ["userID", "studyID"]] = userID, studyID
outputFileLocation = os.path.join(jsonDataPath, "PHI-" + userID + ".json")
startDate = pd.to_datetime(reportDate) - pd.Timedelta(2, unit="D")
endDate = pd.to_datetime(reportDate) + pd.Timedelta(1, unit="D")
reponse1, reponse2 = get_json_data(os.environ["TEMP_EMAIL"], os.environ["TEMP_PASSWORD"],
userID, outputFileLocation, startDate, endDate)
metaData.loc[dIndex, ["getData.response1", "getData.response2"]] = \
reponse1.status_code, reponse2.status_code
# load json data
data = pd.read_json(outputFileLocation)
if "type" in list(data):
if "cbg" in data.type.unique():
# calculate stats
cgmData = data[data.type == "cbg"].copy()
cgmData["utcTime"] = pd.to_datetime(cgmData.time, utc=True)
# get data from 6am to 6am
if (("timezone" in list(data)) | ("timezoneOffset" in list(data))):
if "timezone" in list(data):
userTz = data.timezone.describe()["top"]
tzo = get_timeZoneOffset(reportDate, userTz)
tz = timezone(userTz)
start6amDate = tz.localize(pd.to_datetime(reportDate)
- pd.Timedelta(1, unit="D")
+ pd.Timedelta(5, unit="h")
+ pd.Timedelta(57, unit="m")
+ pd.Timedelta(30, unit="s"))
end6amDate = tz.localize(pd.to_datetime(reportDate)
+ pd.Timedelta(5, unit="h")
+ pd.Timedelta(57, unit="m")
+ pd.Timedelta(30, unit="s"))
cgm = cgmData.loc[((cgmData.utcTime > start6amDate) &
(cgmData.utcTime < end6amDate)), ["time", "value"]]
else: # if there is no timezone given, then infer from timezone offset
tzo = data.timezoneOffset.median()
start6amDate = (pd.to_datetime(reportDate)
- pd.Timedelta(1, unit="D")
+ pd.Timedelta(5, unit="h")
+ pd.Timedelta(57, unit="m")
+ pd.Timedelta(30, unit="s")
- pd.Timedelta(tzo, unit="m"))
end6amDate = (pd.to_datetime(reportDate)
+ pd.Timedelta(5, unit="h")
+ pd.Timedelta(57, unit="m")
+ pd.Timedelta(30, unit="s")
- pd.Timedelta(tzo, unit="m"))
cgm = cgmData.loc[((pd.to_datetime(cgmData.time) > start6amDate) &
(pd.to_datetime(cgmData.time) < end6amDate)), ["time", "value"]]
cgm = cgm.rename(columns={"value": "mmol_L"})
cgm["mg_dL"] = (cgm["mmol_L"] * 18.01559).astype(int)
# round time to the nearest 5 minutes
cgm = td.round_time(cgm)
# drop any duplicates
cgm, nDuplicatesRemoved = td.remove_duplicates(cgm, cgm["roundedTime"])
metaData.loc[dIndex, ["nDuplicatesRemoved"]] = nDuplicatesRemoved
cgm["localTime"] = cgm["roundedTime"] + pd.to_timedelta(tzo, unit="m")
if len(cgm) > 1:
stats = get_stats(cgm)
# save raw data
cgm = cgm.sort_values("localTime").reset_index(drop=True)
cgm = cgm.rename(columns={"localTime": "roundedLocalTime"})
cgm = cgm[["time", "roundedLocalTime", "mmol_L", "mg_dL"]]
cgm.to_csv(os.path.join(indvidualDataFolder,
reportDate + "-cgm-data-for-" + studyID + ".csv"))
else:
stats = pd.DataFrame(index=[dIndex])
stats["incompleteDataset"] = "no cgm data"
else:
stats = pd.DataFrame(index=[dIndex])
stats["incompleteDataset"] = "no timezone information"
else:
stats = pd.DataFrame(index=[dIndex])
stats["incompleteDataset"] = "no data"
else:
stats = pd.DataFrame(index=[dIndex])
stats["incompleteDataset"] = "no data"
stats["studyID"] = studyID
allStats = | pd.concat([allStats, stats], ignore_index=True, sort=False) | pandas.concat |
# -*- coding: utf-8 -*-
import datetime as dt, IPython, pandas as pd, pyarrow as pa, pytest, requests, unittest
from builtins import object
from common import NoAuthTestCase
import graphistry
from mock import patch
triangleEdges = pd.DataFrame({'src': ['a', 'b', 'c'], 'dst': ['b', 'c', 'a']})
triangleNodes = pd.DataFrame({'id': ['a', 'b', 'c'], 'a1': [1, 2, 3], 'a2': ['red', 'blue', 'green']})
triangleNodesRich = pd.DataFrame({
'id': ['a', 'b', 'c'],
'a1': [1, 2, 3],
'a2': ['red', 'blue', 'green'],
'a3': [True, False, False],
'a4': [0.5, 1.5, 1000.3],
'a5': [dt.datetime.fromtimestamp(x) for x in [1440643875, 1440644191, 1440645638]],
'a6': [u'æski ēˈmōjē', u'😋', 's']
})
squareEvil = pd.DataFrame({
'src': [0,1,2,3],
'dst': [1,2,3,0],
'colors': [1, 1, 2, 2],
'list_int': [ [1], [2, 3], [4], []],
'list_str': [ ['x'], ['1', '2'], ['y'], []],
'list_bool': [ [True], [True, False], [False], []],
'list_date_str': [ ['2018-01-01 00:00:00'], ['2018-01-02 00:00:00', '2018-01-03 00:00:00'], ['2018-01-05 00:00:00'], []],
'list_date': [ [pd.Timestamp('2018-01-05')], [pd.Timestamp('2018-01-05'), pd.Timestamp('2018-01-05')], [], []],
'list_mixed': [ [1], ['1', '2'], [False, None], []],
'bool': [True, False, True, True],
'char': ['a', 'b', 'c', 'd'],
'str': ['a', 'b', 'c', 'd'],
'ustr': [u'a', u'b', u'c', u'd'],
'emoji': ['😋', '😋😋', '😋', '😋'],
'int': [0, 1, 2, 3],
'num': [0.5, 1.5, 2.5, 3.5],
'date_str': ['2018-01-01 00:00:00', '2018-01-02 00:00:00', '2018-01-03 00:00:00', '2018-01-05 00:00:00'],
## API 1 BUG: Try with https://github.com/graphistry/pygraphistry/pull/126
'date': [dt.datetime(2018, 1, 1), dt.datetime(2018, 1, 1), dt.datetime(2018, 1, 1), dt.datetime(2018, 1, 1)],
'time': [pd.Timestamp('2018-01-05'), pd.Timestamp('2018-01-05'), pd.Timestamp('2018-01-05'), pd.Timestamp('2018-01-05')],
## API 2 BUG: Need timedelta in https://github.com/graphistry/pygraphistry/blob/master/graphistry/vgraph.py#L108
'delta': [pd.Timedelta('1 day'), pd.Timedelta('1 day'), pd.Timedelta('1 day'), pd.Timedelta('1 day')]
})
for c in squareEvil.columns:
try:
squareEvil[c + '_cat'] = squareEvil[c].astype('category')
except:
# lists aren't categorical
#print('could not make categorical', c)
1
class Fake_Response(object):
def raise_for_status(self):
pass
def json(self):
return {'success': True, 'dataset': 'fakedatasetname', 'viztoken': '<PASSWORD>'}
def assertFrameEqual(df1, df2, **kwds ):
""" Assert that two dataframes are equal, ignoring ordering of columns"""
from pandas.util.testing import assert_frame_equal
return assert_frame_equal(df1.sort_index(axis=1), df2.sort_index(axis=1), check_names=True, **kwds)
@patch('webbrowser.open')
@patch.object(graphistry.util, 'warn')
@patch.object(graphistry.pygraphistry.PyGraphistry, '_etl1')
class TestPlotterBindings_API_1(NoAuthTestCase):
@classmethod
def setUpClass(cls):
graphistry.pygraphistry.PyGraphistry._is_authenticated = True
graphistry.register(api=1)
def test_no_src_dst(self, mock_etl, mock_warn, mock_open):
with self.assertRaises(ValueError):
graphistry.bind().plot(triangleEdges)
with self.assertRaises(ValueError):
graphistry.bind(source='src').plot(triangleEdges)
with self.assertRaises(ValueError):
graphistry.bind(destination='dst').plot(triangleEdges)
with self.assertRaises(ValueError):
graphistry.bind(source='doesnotexist', destination='dst').plot(triangleEdges)
def test_no_nodeid(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst')
with self.assertRaises(ValueError):
plotter.plot(triangleEdges, triangleNodes)
def test_triangle_edges(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst')
plotter.plot(triangleEdges)
self.assertTrue(mock_etl.called)
self.assertFalse(mock_warn.called)
def test_bind_edges(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst', edge_title='src')
plotter.plot(triangleEdges)
self.assertTrue(mock_etl.called)
self.assertFalse(mock_warn.called)
def test_bind_nodes(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst', node='id', point_title='a2')
plotter.plot(triangleEdges, triangleNodes)
self.assertTrue(mock_etl.called)
self.assertFalse(mock_warn.called)
def test_bind_nodes_rich(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst', node='id', point_title='a2')
plotter.plot(triangleEdges, triangleNodesRich)
self.assertTrue(mock_etl.called)
self.assertFalse(mock_warn.called)
def test_bind_edges_rich_2(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst')
plotter.plot(squareEvil)
self.assertTrue(mock_etl.called)
self.assertFalse(mock_warn.called)
def test_unknown_col_edges(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst', edge_title='doesnotexist')
plotter.plot(triangleEdges)
self.assertTrue(mock_etl.called)
self.assertTrue(mock_warn.called)
def test_unknown_col_nodes(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst', node='id', point_title='doesnotexist')
plotter.plot(triangleEdges, triangleNodes)
self.assertTrue(mock_etl.called)
self.assertTrue(mock_warn.called)
@patch.object(graphistry.util, 'error')
def test_empty_graph(self, mock_error, mock_etl, mock_warn, mock_open):
mock_error.side_effect = ValueError('error')
plotter = graphistry.bind(source='src', destination='dst')
with self.assertRaises(ValueError):
plotter.plot(pd.DataFrame([]))
self.assertFalse(mock_etl.called)
self.assertTrue(mock_error.called)
@patch('webbrowser.open')
@patch.object(graphistry.util, 'warn')
@patch.object(graphistry.pygraphistry.PyGraphistry, '_etl2')
class TestPlotterBindings_API_2(NoAuthTestCase):
@classmethod
def setUpClass(cls):
graphistry.pygraphistry.PyGraphistry._is_authenticated = True
graphistry.register(api=2)
def test_no_src_dst(self, mock_etl, mock_warn, mock_open):
with self.assertRaises(ValueError):
graphistry.bind().plot(triangleEdges)
with self.assertRaises(ValueError):
graphistry.bind(source='src').plot(triangleEdges)
with self.assertRaises(ValueError):
graphistry.bind(destination='dst').plot(triangleEdges)
with self.assertRaises(ValueError):
graphistry.bind(source='doesnotexist', destination='dst').plot(triangleEdges)
def test_no_nodeid(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst')
with self.assertRaises(ValueError):
plotter.plot(triangleEdges, triangleNodes)
def test_triangle_edges(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst')
plotter.plot(triangleEdges)
self.assertTrue(mock_etl.called)
self.assertFalse(mock_warn.called)
def test_bind_edges(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst', edge_title='src')
plotter.plot(triangleEdges)
self.assertTrue(mock_etl.called)
self.assertFalse(mock_warn.called)
def test_bind_nodes(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst', node='id', point_title='a2')
plotter.plot(triangleEdges, triangleNodes)
self.assertTrue(mock_etl.called)
self.assertFalse(mock_warn.called)
def test_bind_nodes_rich(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst', node='id', point_title='a2')
plotter.plot(triangleEdges, triangleNodesRich)
self.assertTrue(mock_etl.called)
self.assertFalse(mock_warn.called)
def test_bind_edges_rich_2(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst')
plotter.plot(squareEvil)
self.assertTrue(mock_etl.called)
self.assertFalse(mock_warn.called)
def test_unknown_col_edges(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst', edge_title='doesnotexist')
plotter.plot(triangleEdges)
self.assertTrue(mock_etl.called)
self.assertTrue(mock_warn.called)
def test_unknown_col_nodes(self, mock_etl, mock_warn, mock_open):
plotter = graphistry.bind(source='src', destination='dst', node='id', point_title='doesnotexist')
plotter.plot(triangleEdges, triangleNodes)
self.assertTrue(mock_etl.called)
self.assertTrue(mock_warn.called)
@patch.object(graphistry.util, 'error')
def test_empty_graph(self, mock_error, mock_etl, mock_warn, mock_open):
mock_error.side_effect = ValueError('error')
plotter = graphistry.bind(source='src', destination='dst')
with self.assertRaises(ValueError):
plotter.plot(pd.DataFrame([]))
self.assertFalse(mock_etl.called)
self.assertTrue(mock_error.called)
@patch('webbrowser.open')
@patch('requests.post', return_value=Fake_Response())
class TestPlotterReturnValue(NoAuthTestCase):
def test_no_ipython(self, mock_post, mock_open):
url = graphistry.bind(source='src', destination='dst').plot(triangleEdges)
self.assertIn('fakedatasetname', url)
self.assertIn('faketoken', url)
self.assertTrue(mock_open.called)
self.assertTrue(mock_post.called)
@patch.object(graphistry.util, 'in_ipython', return_value=True)
def test_ipython(self, mock_util, mock_post, mock_open):
widget = graphistry.bind(source='src', destination='dst').plot(triangleEdges)
self.assertIsInstance(widget, IPython.core.display.HTML)
@patch('webbrowser.open')
@patch.object(graphistry.pygraphistry.PyGraphistry, '_etl2')
class TestPlotterCallChaining(NoAuthTestCase):
@classmethod
def setUpClass(cls):
graphistry.pygraphistry.PyGraphistry._is_authenticated = True
graphistry.register(api=2)
def test_bind_chain(self, mock_etl2, mock_open):
plotter0 = graphistry.bind(source='caca').bind(destination='dst', source='src')
plotter0.plot(triangleEdges)
self.assertTrue(mock_etl2.called)
def test_bind_edges_nodes(self, mock_etl2, mock_open):
plotter0 = graphistry.bind(source='src').bind(destination='dst')
plotter1 = plotter0.bind(node='id').bind(point_title='a2')
plotter1.edges(triangleEdges).nodes(triangleNodes).plot()
self.assertTrue(mock_etl2.called)
class TestPlotterConversions(NoAuthTestCase):
@pytest.mark.xfail(raises=ModuleNotFoundError)
def test_igraph2pandas(self):
import igraph
ig = igraph.Graph.Tree(4, 2)
ig.vs['vattrib'] = 0
ig.es['eattrib'] = 1
(e, n) = graphistry.bind(source='src', destination='dst').igraph2pandas(ig)
edges = pd.DataFrame({
'dst': {0: 1, 1: 2, 2: 3},
'src': {0: 0, 1: 0, 2: 1},
'eattrib': {0: 1, 1: 1, 2: 1}
})
nodes = pd.DataFrame({
'__nodeid__': {0: 0, 1: 1, 2: 2, 3: 3},
'vattrib': {0: 0, 1: 0, 2: 0, 3: 0}
})
assertFrameEqual(e, edges)
assertFrameEqual(n, nodes)
@pytest.mark.xfail(raises=ModuleNotFoundError)
def test_pandas2igraph(self):
plotter = graphistry.bind(source='src', destination='dst', node='id')
ig = plotter.pandas2igraph(triangleEdges)
(e, n) = plotter.igraph2pandas(ig)
assertFrameEqual(e, triangleEdges[['src', 'dst']])
assertFrameEqual(n, triangleNodes[['id']])
@pytest.mark.xfail(raises=ModuleNotFoundError)
def test_networkx2igraph(self):
import networkx as nx
ng = nx.complete_graph(3)
[x, y] = [int(x) for x in nx.__version__.split('.')]
if x == 1:
nx.set_node_attributes(ng, 'vattrib', 0)
nx.set_edge_attributes(ng, 'eattrib', 1)
else:
nx.set_node_attributes(ng, 0, 'vattrib')
nx.set_edge_attributes(ng, 1, 'eattrib')
(e, n) = graphistry.bind(source='src', destination='dst').networkx2pandas(ng)
edges = pd.DataFrame({
'dst': {0: 1, 1: 2, 2: 2},
'src': {0: 0, 1: 0, 2: 1},
'eattrib': {0: 1, 1: 1, 2: 1}
})
nodes = pd.DataFrame({
'__nodeid__': {0: 0, 1: 1, 2: 2},
'vattrib': {0: 0, 1: 0, 2: 0}
})
assertFrameEqual(e, edges)
assertFrameEqual(n, nodes)
class TestPlotterNameBindings(NoAuthTestCase):
def test_bind_name(self):
plotter = graphistry.bind().name('n')
assert plotter._name == 'n'
def test_bind_description(self):
plotter = graphistry.bind().description('d')
assert plotter._description == 'd'
class TestPlotterPandasConversions(NoAuthTestCase):
def test_table_to_pandas_from_none(self):
plotter = graphistry.bind()
assert plotter._table_to_pandas(None) is None
def test_table_to_pandas_from_pandas(self):
plotter = graphistry.bind()
df = pd.DataFrame({'x': []})
assert isinstance(plotter._table_to_pandas(df), pd.DataFrame)
def test_table_to_pandas_from_arrow(self):
plotter = graphistry.bind()
df = pd.DataFrame({'x': []})
arr = pa.Table.from_pandas(df)
assert isinstance(plotter._table_to_pandas(arr), pd.DataFrame)
class TestPlotterArrowConversions(NoAuthTestCase):
@classmethod
def setUpClass(cls):
graphistry.pygraphistry.PyGraphistry._is_authenticated = True
graphistry.pygraphistry.PyGraphistry.store_token_creds_in_memory(True)
graphistry.pygraphistry.PyGraphistry.relogin = lambda: True
graphistry.register(api=3)
def test_table_to_arrow_from_none(self):
plotter = graphistry.bind()
assert plotter._table_to_arrow(None) is None
def test_table_to_arrow_from_pandas(self):
plotter = graphistry.bind()
df = pd.DataFrame({'x': []})
assert isinstance(plotter._table_to_arrow(df), pa.Table)
def test_table_to_arrow_from_arrow(self):
plotter = graphistry.bind()
df = pd.DataFrame({'x': []})
arr = pa.Table.from_pandas(df)
assert isinstance(plotter._table_to_arrow(arr), pa.Table)
def test_api3_plot_from_pandas(self):
g = graphistry.edges( | pd.DataFrame({'s': [0], 'd': [0]}) | pandas.DataFrame |
from caes import ICAES2
import pandas as pd
from joblib import Parallel, delayed, parallel_backend
import time
import os
from datetime import datetime
# =====================
# function to enable sensitivity analysis
# =====================
def sizing_and_sensitivity(wrkdir, xlsx_filename, sheet_name, capacity, duration, polytropic_index, float_perm,
int_perm, debug):
# create folder to store results
result_dir = os.path.join(wrkdir, sheet_name)
try:
os.stat(result_dir)
except:
os.mkdir(result_dir)
# -----------------------------
# prepare for sizing
# -----------------------------
entries = ['depth_m', 'thickness_m', 'porosity', 'capacity_MW', 'duration_hr', 'permeability_mD', 'n_cmp1',
'n_exp1']
user_input = pd.read_excel(xlsx_filename, sheet_name=sheet_name)
user_input = user_input.set_index('Variable')
s = pd.Series(index=entries)
s['sheet_name'] = sheet_name
s['depth_m'] = user_input.loc['depth', 'Baseline']
s['thickness_m'] = user_input.loc['h', 'Baseline']
s['porosity'] = user_input.loc['phi', 'Baseline']
s['capacity_MW'] = capacity
s['duration_hr'] = duration
s['permeability_mD'] = user_input.loc['k', 'Baseline']
s['r_w'] = user_input.loc['r_w', 'Baseline']
s['n_cmp1'] = polytropic_index
s['n_exp1'] = polytropic_index
# ------------------
# run sizing
# ------------------
sized_result = sizing(s, debug=False)
# save inputs
os.chdir(result_dir)
sized_result.to_csv('sizing_results.csv')
# ------------------
# prepare for sensitivity
# ------------------
os.chdir(wrkdir)
user_input = pd.read_excel(xlsx_filename, sheet_name=sheet_name)
os.chdir(result_dir)
# use results from sizing
m_dot = pd.Series()
m_dot['Variable'] = 'm_dot'
m_dot['Baseline'] = sized_result['m_dot']
m_dot['Include'] = 'Y'
m_dot['Type'] = 'float'
m_dot['Note'] = 'from sizing'
user_input = user_input.append(m_dot, ignore_index=True)
r_f = | pd.Series() | pandas.Series |
"""
UTZappos processing.
"""
import os
import pandas as pd
from collections import Counter
import torch
def parse_split(root, split):
def parse_pairs(pair_list):
with open(pair_list, 'r') as f:
pairs = f.read().strip().split('\n')
pairs = [t.split() for t in pairs]
pairs = list(map(tuple, pairs))
attrs, objs = zip(*pairs)
return attrs, objs, pairs
tr_attrs, tr_objs, tr_pairs = parse_pairs(os.path.join(root, split, "train_pairs.txt"))
vl_attrs, vl_objs, vl_pairs = parse_pairs(os.path.join(root, split, "val_pairs.txt"))
ts_attrs, ts_objs, ts_pairs = parse_pairs(os.path.join(root, split, "test_pairs.txt"))
all_attrs = sorted(list(set(tr_attrs + vl_attrs + ts_attrs)))
all_objs = sorted(list(set(tr_objs + vl_objs + ts_objs)))
all_pairs = sorted(list(set(tr_pairs + vl_pairs + ts_pairs)))
return all_attrs, all_objs, all_pairs, tr_pairs, vl_pairs, ts_pairs
def get_split_info(root, split, pairs):
data = torch.load(os.path.join(root, f'metadata_{split}.t7'))
train_data, val_data, test_data = [], [], []
for instance in data:
image, attr, obj, settype = instance['image'], instance['attr'], instance['obj'], instance['set']
if attr == 'NA' or (attr, obj) not in pairs or settype == 'NA':
# ignore instances with unlabeled attributes or that are not in the current split
continue
data_i = [image, attr, obj]
if settype == 'train':
train_data.append(data_i)
elif settype == 'val':
val_data.append(data_i)
else:
test_data.append(data_i)
return train_data, val_data, test_data
def split_obj(obj_name):
if "." in obj_name:
cat, *subcat = obj_name.split(".")
return cat, ".".join(subcat)
return obj_name, None
def main():
root = "/Users/jacobkelly/Downloads/compositional_split_natural/ut-zap50k"
split = "compositional-split-natural"
_, _, pairs, *_ = parse_split(root, split)
trn, val, tst = get_split_info(root, split, pairs)
trn_df = pd.DataFrame.from_dict(trn)
trn_df.columns = ["image", "attr", "obj"]
val_df = | pd.DataFrame.from_dict(val) | pandas.DataFrame.from_dict |
import os
import pandas
import numpy as np
import warnings
from . import io
__all__ =["get_target_lightcurve"]
ZTFCOLOR = { # ZTF
"p48r":dict(marker="o",ms=7, mfc="C3"),
"p48g":dict(marker="o",ms=7, mfc="C2"),
"p48i":dict(marker="o",ms=7, mfc="C1")
}
BAD_ZTFCOLOR = { # ZTF
"p48r":dict(marker="o",ms=6, mfc="None", mec="C3"),
"p48g":dict(marker="o",ms=6, mfc="None", mec="C2"),
"p48i":dict(marker="o",ms=6, mfc="None", mec="C1")
}
def get_target_lightcurve(name, load_salt2param=True, **kwargs):
""" """
return LightCurve.from_name(name, load_salt2param=load_salt2param, **kwargs)
# ================== #
# #
# LIGHTCURVES #
# #
# ================== #
class LightCurve( object ):
""" """
def __init__(self, data, meta=None, salt2param=None, use_dask=False):
""" """
self.set_data(data)
self.set_meta(meta)
self.set_salt2param(salt2param)
self._use_dask = use_dask
@classmethod
def from_filename(cls, filename, use_dask=False):
""" """
if use_dask:
from dask import delayed
# This is faster than dd.read_cvs and does what we need here
lc = delayed(pandas.read_csv)(filename, delim_whitespace=True, comment='#')
else:
lc = pandas.read_csv(filename, delim_whitespace=True, comment='#')
meta = pandas.Series([os.path.basename(filename).split("_")[0]],
index=["name"])
return cls(lc, meta=meta, use_dask=use_dask)
@classmethod
def from_name(cls, targetname, use_dask=False, load_salt2param=True, **kwargs):
""" """
filename = io.get_target_lc(targetname)
this = cls.from_filename(filename, use_dask=use_dask, **kwargs)
this._targetname = targetname
if load_salt2param:
this.load_salt2param()
return this
# ================ #
# Method #
# ================ #
# --------- #
# LOADER #
# --------- #
def load_salt2param(self):
""" """
targetname = self.targetname
if targetname is None:
warnings.warn("Unknown targetname (=None) ; use manually set_salt2param()")
return None
from .salt2 import get_target_salt2param
salt2param = get_target_salt2param(targetname)
self.set_salt2param(salt2param)
# --------- #
# SETTER #
# --------- #
def set_data(self, data):
""" """
self._data = data
def set_salt2param(self, salt2param):
""" """
self._salt2param = salt2param
def set_meta(self, meta):
""" """
self._meta = meta
# --------- #
# GETTER #
# --------- #
def get_obsphase(self, min_detection=5, groupby=None, **kwargs):
"""
Returns
-------
pandas.Series
"""
lcdata = self.get_lcdata(min_detection=min_detection, **kwargs)
if groupby is None:
return lcdata["phase"]
return lcdata.groupby(groupby)["phase"].apply( list )
def get_saltmodel(self):
""" """
from .salt2 import get_saltmodel
return get_saltmodel(**self.salt2param.rename({"redshift":"z"}
)[["z","t0","x0","x1","c",
"mwebv"]].to_dict()
)
def get_lcdata(self, zp=None, in_mjdrange=None,
min_detection=None,
filters=None,
flagout=[1,2,4,8,256]):
"""
filters: [string, None or list]
list of filters
- None/'*' or 'all': no filter selection/
- string: just this filter (e.g. 'p48g')
- list of string: just these filters (e.g. ['p48g','p48r'])
flagout: [list of int or string]
flag == 0 means all good, but in details:
0: no warning
1: flux_err==0 Remove unphysical errors
2: chi2dof>3: Remove extreme outliers
4: cloudy>1: BTS cut
8: infobits>0: BTS cut 16: mag_lim<19.3: Cut applied in Dhawan 2021
32: seeing>3: Cut applied in Dhawan 2021
64: fieldid>879: Recommended IPAC cut
128: moonilf>0.5: Recommended IPAC cut
256: has_baseline>1: Has a valid baseline correction
512: airmass>2: Recommended IPAC cut
1024: flux/flux_err>=5: Nominal detection
"""
if flagout in ["all","any","*"]:
data = self.data[self.data["flag"]==0]
if flagout in ["all","any","*"]:
data = self.data[self.data["flag"]==0]
elif flagout is None:
data = self.data.copy()
else:
flag_ = np.all([(self.data.flag&i_==0) for i_ in np.atleast_1d(flagout)], axis=0)
data = self.data[flag_]
if zp is None:
zp = data["ZP"].values
coef = 1.
else:
coef = 10 ** (-(data["ZP"].values - zp) / 2.5)
flux = data["flux"] * coef
error = data["flux_err"] * coef
detection = flux/error
lcdata = data[["mjd","mag","mag_err","filter","field_id","x_pos","y_pos", "flag","mag_lim"]]
additional = pandas.DataFrame(np.asarray([zp, flux,error, detection]).T,
columns=["zp", "flux", "error", "detection"],
index=lcdata.index)
lcdata = pandas.merge(lcdata, additional, left_index=True, right_index=True)
# lcdata.loc["zp",:] = zp
# lcdata["flux"] = flux
# lcdata["error"] = error
# lcdata["detection"] = detection
lcdata["filter"] = lcdata["filter"].replace("ztfg","p48g").replace("ztfr","p48r").replace("ztfi","p48i")
if self.has_salt2param():
lcdata["phase"] = lcdata["mjd"]-self.salt2param['t0']
else:
lcdata["phase"] = np.NaN
if in_mjdrange is not None:
lcdata = lcdata[lcdata["mjd"].between(*in_mjdrange)]
if min_detection is not None:
lcdata = lcdata[lcdata["detection"]>min_detection]
if filters is not None and filters not in ["*","all"]:
lcdata = lcdata[lcdata["filter"].isin(np.atleast_1d(filters))]
return lcdata
def get_sncosmotable(self, min_detection=5, phase_range=[-10,30], filters=["p48r","p48g"], **kwargs):
""" """
from .utils import mag_to_flux
t0 = self.salt2param["t0"]
to_fit = self.get_lcdata(min_detection=min_detection, in_mjdrange= t0 + phase_range,
filters=filters, **kwargs)
sncosmo_lc = to_fit.rename({"mjd":"time", "filter":"band"}, axis=1)[["time","band","zp","mag","mag_err"]]
sncosmo_lc["zpsys"] = "ab"
sncosmo_lc["flux"], sncosmo_lc["flux_err"] = mag_to_flux(sncosmo_lc["mag"],
sncosmo_lc["mag_err"],
zp=sncosmo_lc["zp"])
return sncosmo_lc
def fit_salt(self, free_parameters=['t0', 'x0', 'x1', 'c'],
min_detection=5, phase_range=[-10,30], filters=["p48r","p48g"],
as_dataframe=False,
**kwargs):
""" """
import sncosmo
from astropy import table
from .salt2 import salt2result_to_dataframe
model = self.get_saltmodel()
sncosmo_df = self.get_sncosmotable(min_detection=min_detection,
phase_range=phase_range,
filters=filters)
fitted_data = table.Table.from_pandas(sncosmo_df)
(result, fitted_model) = sncosmo.fit_lc(fitted_data, model,
vparam_names=free_parameters, **kwargs)
if as_dataframe:
return salt2result_to_dataframe(result)
return (fitted_data,model), (result, fitted_model)
def fit_salt_perfilter(lc, filters=["p48g",'p48r','p48i'],
min_detection=5, free_parameters=['t0','x0', 'x1'],
phase_range=[-10, 30], t0_range=[-2,+2]):
""" """
results = []
for filter_ in filters:
try:
result = lc.fit_salt(min_detection=min_detection,
filters=filter_,
free_parameters=free_parameters,
phase_range=phase_range,
bounds={"t0":lc.salt2param['t0']+t0_range},
as_dataframe=True
)
except:
warnings.warn(f"failed for filter {filter_}")
result = pandas.DataFrame()
results.append(result)
return | pandas.concat(results, keys=filters) | pandas.concat |
from tkinter import *
from random import choice
import pandas
BACKGROUND_COLOR = "#B1DDC6"
to_learn = {}
word = {}
# ---------------------------- PANDAS LOGIC ------------------------------- #
try:
data = | pandas.read_csv("./data/words_to_learn.csv") | pandas.read_csv |
from os import times
import locale
from datetime import date, datetime, timedelta, timezone
import time
import requests
from tqdm import tqdm
import pandas as pd
import urllib.request
import os
locale.setlocale(locale.LC_ALL, "it_IT.UTF-8")
DAYNAMES = {
0: "lunedi",
1: "martedi",
2: "mercoledi",
3: "giovedi",
4: "venerdi",
5: "sabato",
6: "domenica",
}
from bs4 import BeautifulSoup
initial_date = date(2011, 4, 23)
today = date.today()
num_days = (today - initial_date).days
print("Num days", num_days)
import re
# dd/mm/YY
results = list()
total = 0
for i in tqdm(range(num_days), desc="Days"):
try:
day = today - timedelta(days=i)
day_str = day.strftime("%y%m%d")
d1 = day.strftime("%Y/%m/%d")
month = day.strftime("%B").lower()
weekday = DAYNAMES[day.weekday()]
url = f"https://www.ilpost.it/{d1}/{weekday}-{day.day}-{month}/"
# print(url)
req = requests.get(url)
soup = BeautifulSoup(req.text, "lxml")
body = soup.find_all("div", attrs={"id": "singleBody"})[0]
def has_url(href):
return href and href.startswith(url)
links = body.find_all(href=has_url)
hrefs = [l["href"] for l in links]
for idx, href in tqdm(enumerate(hrefs), desc="imgs", leave=False):
try:
req = requests.get(href)
soup = BeautifulSoup(req.text, "lxml")
gallery = soup.find("img", attrs={"class": "photo"})
img_url = gallery["data-src"]
if img_url is None:
continue
caption = soup.find(
"span", attrs={"id": "gallery-description"}
).text.strip()
caption = caption.replace("\n", "")
# filename
ext = os.path.splitext(os.path.basename(img_url))[-1]
filename = f"ILPOST_{day_str}_{idx:04d}{ext}"
image_dest_path = f"ILPOST_IT/{filename}.jpg"
if not os.path.exists(image_dest_path):
opener = urllib.request.build_opener()
opener.addheaders = [
("User-Agent", "Googlebot-Image/1.0"),
("X-Forwarded-For", "172.16.31.10"),
]
urllib.request.install_opener(opener)
try:
urllib.request.urlretrieve(img_url, image_dest_path)
except:
print("Download Image as failed")
filename = "failed"
else:
print(f"Skipping file {filename}")
results.append(
{"date": day, "url": img_url, "caption": caption, "img": image_dest_path}
)
except Exception as e:
print("Image at", href, "has failed")
total += len(hrefs)
except:
print("Day", day, "has failed")
if (i + 1) % 500 == 0:
df = pd.DataFrame(results)
df.to_csv(f"ilpost_fotodelgiorno_p{i}.tsv", sep="\t", index=None)
time.sleep(1)
df = | pd.DataFrame(results) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 1 08:02:39 2020
@author: <NAME>
"""
#Standard packages
import os
import numpy as np
import pandas as pd
#Sklearning package
from sklearn.preprocessing import MinMaxScaler
#Graphics packages
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn as sns
#Project packages
import config
from utils import classes_names
from table import best_results
#Still beta, several updates required!
#Best model path:
best_path = os.path.join('results', 'mccv', 'baseline_over_SVC_linear_100.0',
'detailed_score.csv')
mccv_path = config._get_path('mccv')
graphics_path = config._get_path('graphics')
def gs_heatmap(output_name = 'gs_table'):
df, _ = best_results()
c_map = plt.get_cmap('YlGnBu')
c_map = ListedColormap(c_map(np.linspace(0.1, 0.7, 256)))
fig, ax = plt.subplots(figsize=(12, 7))
heat = sns.heatmap(df, annot=True, linewidths= 1,
cmap=c_map, ax = ax, fmt='.4f')
#Ad-hoc
for text in heat.texts:
txt = text.get_text()
n = float(txt)
if(n == 0 or n ==1 ): text.set_text('Yes' if n else 'No')
ax.set_title('Grid Search')
fig.savefig(os.path.join(graphics_path, output_name + '.png'),
dpi = 1200,
bbox_inches = "tight")
return df
def total_score_plot_all():
_total_score_plot(mccv_files(), "Best models")
def _total_score_plot(name_list, main_name):
df_tuples = []
for name in name_list:
df = pd.read_csv(os.path.join(mccv_path, name, 'total_score.csv'))
std = np.std(df[df.columns[1]])
mean = np.mean(df[df.columns[1]])
#label1 = name
label1 = name + ' loss: ' + str(round(mean, 5)) + \
', std: ' + str(round(std, 5))
std = np.std(df[df.columns[3]])
mean = np.mean(df[df.columns[3]])
#label2 = name
label2 = name + ' score: ' + str(round(mean, 5)) + \
', std: ' + str(round(std, 5))
df_tuples.append((df, label1, label2))
total_score_plot(df_tuples, main_name)
def total_score_plot(df_tuples, name):
sns.set_palette(sns.color_palette("hls", len(df_tuples)))
for tup in df_tuples:
plot = sns.distplot(tup[0]["Cross_Entropy_val"],
axlabel = 'Cross Entropy (validation)',
label=tup[1],
)
plt.legend(loc="center", bbox_to_anchor=(0.5, -0.35))
fig = plot.get_figure()
fig.savefig(os.path.join(graphics_path, name + '_cross_entropy.png'),
dpi = 1200,
bbox_inches = "tight")
plt.show()
plt.close()
##The same for accuracy
sns.set_palette(sns.color_palette("hls", len(df_tuples)))
for tup in df_tuples:
plot = sns.distplot(tup[0]["Accuracy_val"],
axlabel = 'Accuracy (validation)',
label=tup[2])
plt.legend(loc="center", bbox_to_anchor=(0.5, -0.35))
fig = plot.get_figure()
fig.savefig(os.path.join(graphics_path, name + '_accuracy.png'),
dpi = 1200,
bbox_inches = "tight")
plt.show()
plt.close()
def self_heatmap():
df = pd.read_csv(os.path.join('results', 'SelfTraining.csv'), index_col=0)
df.index.name = None
df.drop(['base_path'], axis=1, inplace=True)
rename = {'time' : 'Time (s)',
'amount_labaled' : 'Samples labeled',
'accuracy' : 'Accuracy',
'log_loss' : 'Log-los',
'std_log_loss' : 'log-los (std)'}
df.rename(columns = rename, inplace=True)
scaler = MinMaxScaler()
df_dual = pd.DataFrame(data = scaler.fit_transform(df),
columns = df.columns,
index = df.index)
heat0 = sns.heatmap(df, annot=True, linewidths= 1, fmt='.3f')
fig, ax = plt.subplots(figsize=(12, 5))
color_map = plt.get_cmap('YlGnBu')
color_map = ListedColormap(color_map(np.linspace(0.1, 0.75, 256)))
heat = sns.heatmap(df_dual, annot=True, linewidths= 1,
cmap= color_map, ax = ax, fmt='.3f')
colorbar = ax.collections[0].colorbar
colorbar.set_ticks([0.1, 0.5, 1])
colorbar.set_ticklabels(['Low', 'Middle', 'High'])
for t in range(len(heat0.texts)):
txt = heat0.texts[t].get_text()
heat.texts[t].set_text(txt)
ax.set_title('SelfTraining Table (5-fold cross validation)')
fig.savefig(os.path.join(graphics_path, 'SelfTraining_table.png'),
dpi = 1200,)
def best_model_results(model_name = 'baseline_over_SVC_linear_100.0'):
path = os.path.join(mccv_path, model_name)
probability_heatmap(pd.read_csv(os.path.join(path,'probability.csv')),
model_name)
cross_heatmap(pd.read_csv(os.path.join(path,'cross_matrix.csv')),
model_name)
detailed_score_heatmap(pd.read_csv(os.path.join(path,
'detailed_score.csv')),
model_name)
def probability_heatmap(df, name):
names, classes = classes_names()
w = df.mean(axis=0).values.reshape(classes, classes) #ndarray
w = np.around(w, decimals=3)
prob_frame = pd.DataFrame(data = w, columns = names, index = names)
fig, ax = plt.subplots(figsize=(12, 7))
sns.heatmap(prob_frame, annot=True, linewidths= 1, cmap="YlGnBu", ax = ax)
ax.set_title('True class v.s. Predicted Probability Class')
fig.savefig(os.path.join(graphics_path, name + '_probability.png'),
dpi = 1200,
bbox_inches = "tight")
def cross_heatmap(df, name):
names, classes = classes_names()
w = df.mean(axis=0).values.reshape(classes, classes) #ndarray
for i in range(classes):
w[i] /= np.sum(w[i])
w = np.around(w, decimals=3)
cross_frame = pd.DataFrame(data = w, columns = names, index = names)
fig, ax = plt.subplots(figsize=(12, 7))
sns.heatmap(cross_frame, annot=True, linewidths= 1, cmap="YlGnBu", ax = ax)
ax.set_title('True class v.s. Predicted Class (mean)')
fig.savefig(os.path.join(graphics_path, name + '_cross_prediction.png'),
dpi = 1200,
bbox_inches = "tight")
def mccv_detailed_score_heatmap():
models = mccv_files()
for model_name in models:
df = pd.read_csv(os.path.join(mccv_path,model_name,
'detailed_score.csv'))
detailed_score_heatmap(df, model_name)
def detailed_score_heatmap(df, name):
names, classes = classes_names()
w = df.mean(axis=0).values.reshape(classes, 4)
w = np.around(w, decimals=3)
score_frame = pd.DataFrame(data = w,
columns=['sensitivity', 'specificity',
'precision', 'f1_score'],
index = names)
fig, ax = plt.subplots(figsize=(7, 7))
#color_map = plt.get_cmap('YlGnBu_r')
#color_map = ListedColormap(color_map(np.linspace(0.1, 0.6, 256)))
sns.heatmap(score_frame,
annot=True, linewidths= 0.05, cmap='YlGnBu', ax = ax)
ax.set_title(name + ' Scores')
fig.savefig(os.path.join(graphics_path, name + '_detailed_score.png'),
dpi = 1200,
bbox_inches = "tight")
def final_table():
names, classes = classes_names()
ked_et_al = {'Cellulose acetate': 0.97,
'Cellulose like': 0.65,
'Ethylene propylene rubber': 0.76,
'Morphotype 1': 0.89,
'Morphotype 2': 0.88,
'PEVA': 0.74,
'Poly(amide)': 1,
'Poly(ethylene)' : 1,
'Poly(ethylene) + fouling' : 0.88,
'Poly(ethylene) like' : 0.69,
'Poly(propylene)' : 0.99,
'Poly(propylene) like' : 0.51,
'Poly(styrene)' : 0.99,
'Unknown' : 0 }
w0 = []
for n in names:
w0.append(ked_et_al[n])
w0 = np.array(w0)
#Load model's sensitivity mccv data (using Kedzierski et. al methodology)
df1 = pd.read_csv(os.path.join('results',
'final_model_mccv_all_data_detailed_score.csv')
)
w1 = df1.mean(axis=0).values.reshape(classes, 4)
w1 = np.around(w1, decimals=3)[:, 0]
#Load MCCV results (best model)
df2 = | pd.read_csv(best_path) | pandas.read_csv |
import os
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
def load_data():
here = os.path.realpath(__file__)
here = os.path.dirname(here)
fn = os.path.join(here, 'monthly-milk-production.csv')
return pd.read_csv(fn, index_col='Month')
def clean_data(df):
# convert index to a timeseries
df.index = | pd.to_datetime(df.index) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 4 2021, last edited 27 Oct 2021
Fiber flow emissions calculations module - class version
Inputs:
Excel file with old PPI market & emissions data ('FiberModelAll_Python_v3-yields.xlsx')
Outputs:
Dict of keys 'old','new','forest','trade' with emissions calcs
(*testing inputs*
x = 'FiberModelAll_Python_v2.xlsx'
f2pVolOld = pd.read_excel(x, 'OldData', usecols="A:I", skiprows=1, nrows=21, index_col=0)
pbpVolOld = pd.read_excel(x, 'OldData', usecols="K:R", skiprows=1, nrows=14, index_col=0)
pbpVolOld.columns = [x[:-2] for x in pbpVolOld.columns]
consCollOld = pd.read_excel(x, 'OldData', usecols="K:Q", skiprows=34, nrows=3, index_col=0)
rLevel = pd.read_excel(x, 'Demand', usecols="F:K", skiprows=16, nrows=5)
rLevel = {t: list(rLevel[t][np.isfinite(rLevel[t])].values) for t in fProd}
fProd = [t for t in f2pVolOld.iloc[:,:6].columns]
fProdM = [t for t in f2pVolOld.iloc[:,:7].columns]
rFiber = f2pVolOld.index[:16]
vFiber = f2pVolOld.index[16:]
rPulp = [p for p in pbpVolOld.index if 'Rec' in p]
vPulp = [q for q in pbpVolOld.index if 'Vir' in q]
fPulp = [f for f in pbpVolOld.index]
import numpy as np
f2pYld = pd.read_excel(x, 'Fiber', usecols="I:O", skiprows=1, nrows=21)
f2pYld.index = np.concatenate([rFiber.values, vFiber.values], axis=0)
pulpYld = pd.read_excel(x, 'Pulp', usecols="D", skiprows=1, nrows=14)
pulpYld.index = rPulp + vPulp
transPct = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=32, nrows=11, index_col=0)
transKM = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=46, nrows=11, index_col=0)
transUMI = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=59, nrows=1, index_col=0)
rsdlModes = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=32, nrows=6, index_col=0)
rsdlbio = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=41, nrows=4, index_col=0)
rsdlbio = rsdlbio.fillna(0)
rsdlfos = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=48, nrows=4, index_col=0)
rsdlfos = rsdlfos.fillna(0)
woodint = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=58, nrows=1, index_col=0)
wtotalGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=62, nrows=6, index_col=0)
wtotalGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=71, nrows=6, index_col=0)
wbioGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=80, nrows=6, index_col=0)
wbioGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=89, nrows=6, index_col=0)
wfosGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=98, nrows=6, index_col=0)
wfosGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=107, nrows=6, index_col=0)
exportOld = pd.read_excel(x, 'OldData', usecols="E:G", skiprows=31, nrows=16, index_col=0)
exportOld.iloc[:,:-1] = exportOld.iloc[:,:-1]
exportNew = exportOld.iloc[:,:-1] * 1.5
exportNew.columns = ['exportNew']
exportNew = exportNew.assign(TransCode=exportOld['TransCode'].values)
fiberType = pd.read_excel(x, 'OldData', usecols="A:B", skiprows=31, nrows=20, index_col=0)
chinaVals = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=66, nrows=3, index_col=0)
chinaCons = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=72, nrows=6, index_col=0)
fYield = pd.read_excel(x, 'EmTables', usecols="L:N", skiprows=81, nrows=5, index_col=0)
)
@author: <NAME>
"""
import pandas as pd
import numpy as np
class en_emissions(): # energy & emissions
def __init__(cls,xls,fProd,rLevel,f2pYld,pulpYld,f2pVolNew,pbpVolNew,consCollNew,exportNew,demandNew):
# xls (str) - name of Excel spreadsheet to pull data from
# fProd (list) - list of products in current scenario
# rLevel (df) - recycled content level by product
# f2pYld (df) - fiber to pulp yield by pulp product; indexed by fiber
# pulpYld (df) - pulp to product yield; pulp as index
# f2pVolNew (df) - fiber to pulp volume (in short tons); indexed by pulp name
# pbpVolNew (df) - pulp by product volume; indexed by pulp name
# consCollNew (df) - domestic consumption, collection, and recovery by product
# demandNew (df) - new demand by product; indexed by rec level
uC = 0.907185 # unit conversion of MM US ton to Mg/metric ton
cls.fProd = fProd
cls.fProdM = fProd + ['Market']
cls.rLevel = rLevel
cls.f2pYld = f2pYld
cls.pulpYld = pulpYld
cls.f2pVolNew = f2pVolNew * uC
cls.pbpVolNew = pbpVolNew * uC
cls.consCollNew = consCollNew * uC
cls.exportNew = exportNew * uC
cls.demandNew = {t: demandNew[t] * uC for t in demandNew.keys()}
with pd.ExcelFile(xls) as x:
# Old data
cls.f2pVolOld = pd.read_excel(x, 'OldData', usecols="A:I", skiprows=1, nrows=21, index_col=0)
cls.f2pVolOld.iloc[:,:-1] = cls.f2pVolOld.iloc[:,:-1] * uC * 1000
cls.f2pVolNew = cls.f2pVolNew.assign(TransCode=cls.f2pVolOld['TransCode'].values)
cls.pbpVolOld = pd.read_excel(x, 'OldData', usecols="K:R", skiprows=1, nrows=14, index_col=0)
cls.pbpVolOld.columns = [x[:-2] for x in cls.pbpVolOld.columns] # has .1 after column names for pandas duplicate
cls.pbpVolOld.iloc[:,:-1] = cls.pbpVolOld.iloc[:,:-1] * uC * 1000
cls.pbpVolNew = cls.pbpVolNew.assign(TransCode=cls.pbpVolOld['TransCode'].values)
cls.prodLD = pd.read_excel(x, 'OldData', usecols="K:Q", skiprows=19, nrows=5, index_col=0) * uC * 1000
cls.prodDemand = pd.read_excel(x, 'OldData', usecols="A:G", skiprows=26, nrows=1, index_col=0) * uC * 1000
cls.consCollOld = pd.read_excel(x, 'OldData', usecols="K:Q", skiprows=29, nrows=3, index_col=0) * uC * 1000
cls.exportOld = pd.read_excel(x, 'OldData', usecols="E:G", skiprows=31, nrows=16, index_col=0)
cls.exportOld.iloc[:,:-1] = cls.exportOld.iloc[:,:-1] * uC * 1000
cls.exportNew = cls.exportNew.assign(TransCode=cls.exportOld['TransCode'].values)
cls.fiberType = pd.read_excel(x, 'OldData', usecols="A:B", skiprows=31, nrows=20, index_col=0)
cls.rFiber = cls.f2pVolOld.index[:16]
cls.vFiber = cls.f2pVolOld.index[16:]
cls.rPulp = [p for p in cls.pbpVolOld.index if 'Rec' in p]
cls.vPulp = [q for q in cls.pbpVolOld.index if 'Vir' in q]
cls.fPulp = [f for f in cls.pbpVolOld.index]
# Emissions Info
cls.chemicals = pd.read_excel(x, 'nonFiber', usecols="A:B,E:L", skiprows=2, nrows=42, index_col=0)
cls.eolEmissions = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=2, nrows=3, index_col=0)
cls.bfEI = pd.read_excel(x, 'EmTables', usecols="J:P", skiprows=2, nrows=3, index_col=0)
cls.bfEI.columns = [x[:-2] for x in cls.bfEI.columns] # has .1 after column names for some reason
cls.bioPct = pd.read_excel(x, 'EmTables', usecols="J:P", skiprows=8, nrows=2, index_col=0)
cls.pwpEI = pd.read_excel(x, 'EmTables', usecols="O:P", skiprows=14, nrows=5, index_col=0)
cls.bfCO2 = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=9, nrows=2, index_col=0)
cls.fuelTable = pd.read_excel(x, 'EmTables', usecols="A:M", skiprows=15, nrows=13, index_col=0)
cls.fuelTable = cls.fuelTable.fillna(0)
cls.rsdlModes = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=32, nrows=6, index_col=0)
cls.rsdlbio = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=41, nrows=4, index_col=0)
cls.rsdlbio = cls.rsdlbio.fillna(0)
cls.rsdlfos = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=48, nrows=4, index_col=0)
cls.rsdlfos = cls.rsdlfos.fillna(0)
cls.transPct = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=32, nrows=11, index_col=0)
cls.transKM = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=46, nrows=11, index_col=0)
cls.transUMI = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=59, nrows=1, index_col=0)
cls.woodint = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=58, nrows=1, index_col=0)
cls.wtotalGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=62, nrows=6, index_col=0)
cls.wtotalGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=71, nrows=6, index_col=0)
cls.wbioGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=80, nrows=6, index_col=0)
cls.wbioGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=89, nrows=6, index_col=0)
cls.wfosGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=98, nrows=6, index_col=0)
cls.wfosGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=107, nrows=6, index_col=0)
cls.chinaVals = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=66, nrows=3, index_col=0)
cls.chinaCons = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=72, nrows=6, index_col=0)
cls.fYield = pd.read_excel(x, 'EmTables', usecols="L:N", skiprows=81, nrows=5, index_col=0)
def calculateTrans(cls,transVol):
# transVol [df] - item, volume (in Mg) by product, TransCode; indexed by fiberCode or other label
# transPct [df] - % traversed for transMode by transCode; indexed by transCode
# transKM [df] - distance traversed for transMode by transCode; indexed by transCode
# transUMI [s] - unit impact by mode (truck, train, boat); indexed by "transUMI"
transImpact = pd.Series(0, index = cls.fProd)
tC = transVol['TransCode']
tC = tC[(tC != 0) & (tC != 1)] # index non-zero/non-NaN elements only
transVol = transVol.loc[tC.index]
for t in cls.fProd:
for m in cls.transUMI.columns:
transImpact[t] += sum(transVol[t] * cls.transPct.loc[tC,m].values * cls.transKM.loc[tC,m].values * cls.transUMI[m].values * 1)
return transImpact
def calculateChem(cls,chemicals,prodDemand):
# chemicals [df] - nonfiber name, % use by product, transCode, impact factor; indexed by number
# prodDemand [df] - total demand; indexed by product
chemImpact = pd.Series(0, index = cls.fProd, name = 'chemImp')
chemVol = pd.DataFrame(0, index = chemicals.index, columns = cls.fProd)
for t in cls.fProd:
chemImpact[t] = sum(prodDemand[t].values * chemicals[t] * chemicals['Impact Factor'])
chemVol[t] = chemicals[t] * prodDemand[t].values
chemVol = chemVol.join(chemicals['TransCode'])
chemTrans = pd.Series(cls.calculateTrans(chemVol), name = 'chemTrans')
chemImpact = pd.DataFrame(chemImpact)
return pd.concat([chemImpact, chemTrans], axis=1)
def calculateEoL(cls,eolEmissions,consColl):
# eolEmissions [df] - biogenic and fossil CO2 emission factors & transportation code by product; indexed by bio/fosCO2
# consColl [df] - domestic consumption, collection, and recovery by product; indexed by name
prod2landfill = pd.Series(consColl.loc['Domestic Consumption'] - consColl.loc['Recovery Volume'],
index = cls.fProd, name = 'prod2landfill')
mrf2landfill = pd.Series(consColl.loc['Collection Volume'] - consColl.loc['Recovery Volume'],
index = cls.fProd, name = 'mrf2landfill')
bioEoL = pd.Series(prod2landfill * eolEmissions.loc['bioCO2'], index = cls.fProd, name = 'bioEoL')
mrf2landfill = pd.DataFrame(mrf2landfill) # works b/c all prods have same TransCode
transEoL = pd.Series(cls.calculateTrans(mrf2landfill.T.assign(TransCode=eolEmissions.loc['TransCode'].values[0])),
index = cls.fProd, name = 'eolTrans')
fesTransEoL = pd.Series(prod2landfill * eolEmissions.loc['fossilCO2'] + transEoL, index = cls.fProd,
name = 'fesTransEoL')
bftEoL = pd.Series(bioEoL + fesTransEoL, name = 'bftEoL')
return pd.concat([bioEoL, fesTransEoL, bftEoL, transEoL], axis=1)
def getEnergyYldCoeff(cls,f2pVol,pbpVol):
# f2pVol [df] - recycled fiber to pulp (in Mg); indexed by fiber code
# pbpVol [df] - pulp by product (in Mg); indexed by pulp name
#
# PYCoeff [s] - pulp yield coeffient; indexed by pulp
f2pByPulp = pd.Series(0, index = pbpVol.index, name = 'fiber2pulp')
for p in cls.rPulp:
f2pByPulp[p] = sum([f2pVol.loc[cls.rFiber,t].sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.rPulp.index(p)])
for q in cls.vPulp:
f2pByPulp[q] = sum([f2pVol.loc[cls.vFiber,t].sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.vPulp.index(q)])
pulpProd = pd.Series([pbpVol.loc[i].sum() for i in pbpVol.index], index = pbpVol.index, name = 'pulpProd')
PYCoeff = (pd.Series(f2pByPulp / pulpProd, name = 'pulpYldCoeff'))
PYCoeff.replace([np.inf, -np.inf], np.nan, inplace=True)
PYCoeff = PYCoeff.fillna(0)
return PYCoeff
def getEnergyPulpPct(cls,pbpVol):
# pbpVol [df] - pulp by product (in Mg); indexed by pulp name
#
# pulpPct [df] - % of rec/vir pulp used in product; indexed by pulp name
pulpPct = pbpVol.copy().drop(['TransCode'], axis=1)
for t in pulpPct.columns:
rTotalPulp = pulpPct.loc[cls.rPulp,t].sum()
vTotalPulp = pulpPct.loc[cls.vPulp,t].sum()
pulpPct.loc[cls.rPulp,t] = pulpPct.loc[cls.rPulp,t] / rTotalPulp
pulpPct.loc[cls.vPulp,t] = pulpPct.loc[cls.vPulp,t] / vTotalPulp
return pulpPct.fillna(0)
def getEnergyMultiProd(cls,PYMult,pulpPct):
# PYMult [s] - pulp yield multiplier; indexed by pulp name
# pulpPct [df] - % of rec/vir pulp used in product; indexed by pulp name
#
# (return) [df] - rec/vir yield multiprod by product; index by r/vYldMultiProd
rYldMultiProd = pd.Series([sum(pulpPct.loc[cls.rPulp,t] * PYMult[cls.rPulp]) for t in cls.fProd],
index = cls.fProd, name = 'rYldMultiProd')
vYldMultiProd = pd.Series([sum(pulpPct.loc[cls.vPulp,t] * PYMult[cls.vPulp]) for t in cls.fProd],
index = cls.fProd, name = 'vYldMultiProd')
rYldMultiProd.replace([np.inf, -np.inf], np.nan, inplace=True)
vYldMultiProd.replace([np.inf, -np.inf], np.nan, inplace=True)
return pd.concat([rYldMultiProd.fillna(0), vYldMultiProd.fillna(0)], axis=1)
def calculateEnergy(cls,pbpVol,prodLD,multiProd,pwpEI,paperEI):
# prodLD (df) - demand by product; indexed by % recycled content level
# bfEI (df) - bio & fes energy intensity fitting parameters by product; indexed by name
# bioPct (df) - bio fitting parameter for PWP; indexed by name
# pwpEI (df) - energy intensity of PWP pulp; indexed by pulp name
# paperEI (df) - paper production energy intensity; indexed by 'PPE'
# pbpVol (df) - pulp by product (in Mg); indexed by pulp name
# multiProd (df) - rec/vir yield multiprod by product; indexed by product
bioEnergy = pd.Series(0, index = cls.fProd, name = "bioEnergy")
fesEnergy = pd.Series(0, index = cls.fProd, name = 'fesEnergy')
totalEnergy = pd.Series(0, index = cls.fProd, name = 'totalEnergy')
for t in cls.fProd:
bioEnergy[t] = sum(prodLD[t].values[:len(cls.rLevel[t])] *
sum([r * cls.bfEI.loc['bioEI b1',t] + cls.bfEI.loc['bioEI b0',t] for r in cls.rLevel[t]]))
fesEnergy[t] = sum(prodLD[t].values[:len(cls.rLevel[t])] *
cls.bfEI.loc['fesEI',t] * multiProd.loc[t,'rYldMultiProd'])
if 'P&W' or 'News' in t:
avgrecPct = sum(prodLD[t].values[:len(cls.rLevel[t])] * cls.rLevel[t]) / prodLD[t].sum()
bioPctPW = avgrecPct * cls.bioPct.loc['bioPct b1',t] + cls.bioPct.loc['bioPct b0',t]
pulpProdEnergy = sum([pbpVol.loc[p,t] * pwpEI.loc[p].values[0] for p in pwpEI.index])
ppEnergy = pulpProdEnergy + prodLD[t].sum() * paperEI.values[0]
bioEnergy[t] = bioPctPW * ppEnergy
fesEnergy[t] = (1 - bioPctPW) * ppEnergy * multiProd.loc[t,'rYldMultiProd']
totalEnergy[t] = bioEnergy[t] + fesEnergy[t]
return pd.concat([bioEnergy, fesEnergy, totalEnergy], axis=1)
def calculateProduction(cls,calcEnergy):
# calcEnergy (df) - bio, fes, and total energy from calculateEnergy; indexed by product
# bfCO2 (df) - bio & fes CO2 fitting parameters; indexed by product
bioCO2 = pd.Series(0, index = cls.fProd, name = 'bioCO2')
fesCO2 = pd.Series(0, index = cls.fProd, name = 'fesCO2')
totalCO2 = pd.Series(0, index = cls.fProd, name = 'totalCO2')
for t in cls.fProd:
bioCO2[t] = calcEnergy.loc[t,'bioEnergy'] * cls.bfCO2.loc['bioCO2 b1',t]
fesCO2[t] = calcEnergy.loc[t,'fesEnergy'] * cls.bfCO2.loc['fesCO2 b1',t]
totalCO2[t] = bioCO2[t] + fesCO2[t]
return pd.concat([bioCO2, fesCO2, totalCO2], axis=1)
def calculateFuel(cls,calcEnergy):
# calcEnergy (df) - bio, fes, and total energy from calculateEnergy; indexed by product
# fuelTable (df) - fuel impact by product; indexed by fuel type
fuels = cls.fuelTable.index
bioFI = pd.Series(0, index = cls.fProd, name = 'bioFuelImp')
fesFI = pd.Series(0, index = cls.fProd, name = 'fesFuelImp')
fuelImp = pd.Series(0, index = cls.fProd, name = 'fuelImp')
for t in cls.fProd:
bioFI[t] = calcEnergy.loc[t,'bioEnergy'] * sum([cls.fuelTable.loc[f,t] * cls.fuelTable.loc[f,'Upstream Impact Factor']
for f in fuels if cls.fuelTable.loc[f,'Fuel Type'] == 1])
fesFI[t] = calcEnergy.loc[t,'fesEnergy'] * sum([cls.fuelTable.loc[f,t] * cls.fuelTable.loc[f,'Upstream Impact Factor']
for f in fuels if cls.fuelTable.loc[f,'Fuel Type'] == 2])
fuelImp[t] = bioFI[t] + fesFI[t]
fuelTransVol = cls.fuelTable.copy()
fuel1 = [f for f in fuels if cls.fuelTable.loc[f,'Fuel Type'] == 1]
fuel2 = [f for f in fuels if cls.fuelTable.loc[f,'Fuel Type'] == 2]
for t in cls.fProd:
fuelTransVol.loc[fuel1,t] = [calcEnergy.loc[t,'bioEnergy'] * cls.fuelTable.loc[f,t] * cls.fuelTable.loc[f,'FU/GJ']
for f in fuel1]
fuelTransVol.loc[fuel2,t] = [calcEnergy.loc[t,'fesEnergy'] * cls.fuelTable.loc[f,t] * cls.fuelTable.loc[f,'FU/GJ']
for f in fuel2]
fuelTrans = pd.Series(cls.calculateTrans(fuelTransVol), name = 'fuelTrans')
return pd.concat([bioFI, fesFI, fuelImp, fuelTrans], axis=1)
def calculateResidual(cls,pbpVol,f2pVol):
# pbpVol [df] - pulp by product (in Mg); indexed by pulp name
# f2pVol [df] - recycled fiber to pulp (in Mg); indexed by fiber code
# f2pYld [df] - fiber to pulp yield by pulp product; indexed by fiber
# pulpYld [df] - pulp to product yield; indexed by pulp
# rsdlModes [df] - residual treatments modes; indexed by residual type
# rsdlbio [df] - transport and biogenic emissions factors; indexed by residual treatment mode
# rsdlfos [df] - transport and fossil emissions factors; indexed by residual treatment mode
pulpProd = pd.Series(0, index = cls.rPulp + cls.vPulp, name = 'pulpProduced')
fiberRes = pd.Series(0, index = cls.rPulp + cls.vPulp, name = 'fiberResidue')
for p in cls.rPulp: # order of fPulp must match order of r/vPulp
pulpProd[p] = sum([(f2pVol.loc[cls.rFiber,t].mul(cls.f2pYld.loc[cls.rFiber,t])).sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.rPulp.index(p)])
fiberRes[p] = sum([(f2pVol.loc[cls.rFiber,t].mul(1 - cls.f2pYld.loc[cls.rFiber,t])).sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.rPulp.index(p)])
for q in cls.vPulp:
pulpProd[q] = sum([(f2pVol.loc[cls.vFiber,t].mul(cls.f2pYld.loc[cls.vFiber,t])).sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.vPulp.index(q)])
fiberRes[q] = sum([(f2pVol.loc[cls.vFiber,t].mul(1 - cls.f2pYld.loc[cls.vFiber,t])).sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.vPulp.index(q)])
pulpUP = pbpVol.iloc[:,:-1].div(pulpProd, axis=0).fillna(0) # pulpUsePct
rFiberRsd = pd.Series((pulpUP.loc[cls.rPulp].mul(fiberRes[cls.rPulp], axis=0)).sum(), index = cls.fProd, name = 'rFiberRsd')
rPulpRsd = pd.Series((pulpUP.loc[cls.rPulp].mul(1 - cls.pulpYld.iloc[:,0].loc[cls.rPulp], axis=0)).sum(), index = cls.fProd, name = 'rPulpRsd')
rTotalRsd = rFiberRsd + rPulpRsd
vFiberRsd = pd.Series((pulpUP.loc[cls.vPulp].mul(fiberRes[cls.vPulp], axis=0)).sum(), index = cls.fProd, name = 'vFiberRsd')
vPulpRsd = pd.Series((pulpUP.loc[cls.vPulp].mul(1 - cls.pulpYld.iloc[:,0].loc[cls.vPulp], axis=0)).sum(), index = cls.fProd, name = 'vPulpRsd')
vTotalRsd = vFiberRsd + vPulpRsd
rsdlType = cls.rsdlModes.index
rsdlQuantity = pd.DataFrame(0, index = rsdlType, columns = cls.fProd)
for rt in rsdlType:
if cls.rsdlModes.loc[rt,'Input Base'] == 1:
rsdlQuantity.loc[rt,:] = rTotalRsd * cls.rsdlModes.loc[rt,'Intensity']
if cls.rsdlModes.loc[rt,'Input Base'] == 2:
rsdlQuantity.loc[rt,:] = vTotalRsd * cls.rsdlModes.loc[rt,'Intensity']
rsdlMode = cls.rsdlModes.columns[:-2]
rsdlModeVol = {rM: pd.DataFrame(0, index = rsdlType, columns = cls.fProd)
for rM in rsdlMode}
for rM in rsdlMode:
rsdlModeVol[rM] = rsdlQuantity.mul(cls.rsdlModes[rM], axis=0)
rsdlModeVol[rM] = rsdlModeVol[rM].assign(TransCode=cls.rsdlbio.loc[rM,'TransCode'] * np.ones(len(rsdlType)))
rsdlModeVol[rM].replace([np.inf, -np.inf], np.nan, inplace=True) # TODO: what happens to make this inf?
rsdlModeVol[rM].fillna(0)
bioImp = pd.Series(0, index = cls.fProd, name = 'bioImp')
fosImp = pd.Series(0, index = cls.fProd, name = 'fossilImp')
for t in cls.fProd:
bioImp[t] = sum([rsdlModeVol[rM][t].sum() * cls.rsdlbio.loc[rM,t] for rM in rsdlMode])
fosImp[t] = sum([rsdlModeVol[rM][t].sum() * cls.rsdlfos.loc[rM,t] for rM in rsdlMode])
biofosImp = pd.Series(bioImp + fosImp, name = 'bio+fos')
rsdlTrans = pd.Series(0, index = cls.fProd, name = 'rsdlTrans')
for rM in rsdlMode:
rsdlTrans += cls.calculateTrans(rsdlModeVol[rM])
return pd.concat([bioImp, fosImp, biofosImp, rsdlTrans], axis=1)
def getExportTrans(cls,transVol):
transImpact = pd.Series(0, index = transVol.columns[:-1])
tC = transVol['TransCode']
tC = tC[(tC != 0) & (tC != 1)] # index non-zero/non-NaN elements only
transVol = transVol.loc[tC.index]
for n in transVol.columns[:-1]:
for m in cls.transUMI.columns:
transImpact[n] += sum(transVol[n] * cls.transPct.loc[tC,m].values * cls.transKM.loc[tC,m].values * cls.transUMI[m].values)
return transImpact.values
def calculateExport(cls,exportOld,exportNew):
# exportOld [df] old export from US; indexed by rec fiber
# exportNew [df] new export from US; indexed by rec fiber
impChange = pd.Series(0, index = cls.fYield.index, name = 'impChangeByGroup')
sumChange = pd.Series(0, index = cls.fYield.index, name = 'sumNetChange')
for r in impChange.index:
typeMask = cls.fiberType[cls.fiberType['fiberType'] == r].index
# impChange[r] = (exportOld.loc[typeMask, 'exportOld'] - exportNew.loc[typeMask, 'exportNew']).sum()
impChange[r] = (exportNew.loc[typeMask, 'exportNew'] - exportOld.loc[typeMask, 'exportOld']).sum()
sumChange[r] = impChange[r] * (1 - cls.fYield.loc[r,'US'] / cls.fYield.loc[r,'China'])
beta = sumChange.sum() / (cls.chinaCons.loc['totalVir'].values + cls.chinaCons.loc['domesticRec'].values +
cls.chinaCons.loc['importRec-US'].values + cls.chinaCons.loc['importRec-nonUS'].values)
# chinaTrans = cls.getExportTrans(exportOld) - cls.getExportTrans(exportNew)
chinaTrans = cls.getExportTrans(exportNew) - cls.getExportTrans(exportOld)
return cls.chinaVals.loc['Production'] * cls.chinaVals.loc['Energy Intensity'] * cls.chinaVals.loc['Emission Factor'] * beta + chinaTrans
def getForestVirginGHG(cls,virCons,woodint,slope,intercept):
# virCons [df] change in virgin consumption; products as columns
# woodint [df] intervals of virgin wood consumption
# slope [s] b1 value for GHG emissions
# intercept[s] b0 value for GHG emissions
for n in range(1,len(woodint.columns)):
if (woodint[n].values <= virCons) & (virCons < woodint[n+1].values):
return virCons * slope[n] + intercept[n]
return 0 # catch values outside of interval
def calculateForest(cls,virCons,forYear):
# virCons [float] change in virgin consumption, sum of all products
# forYear [int] forest year length for cumulative emissions calcs; 10-90 by ten
deltaTotalGHG = pd.Series(cls.getForestVirginGHG(virCons / 1e6, cls.woodint, cls.wtotalGHGb1[forYear], cls.wtotalGHGb0[forYear]),
name = 'totalGHG') * 1e6
deltabioGHG = pd.Series(cls.getForestVirginGHG(virCons / 1e6, cls.woodint, cls.wbioGHGb1[forYear], cls.wbioGHGb0[forYear]),
name = 'bioGHG') * 1e6
deltafosGHG = pd.Series(cls.getForestVirginGHG(virCons / 1e6, cls.woodint, cls.wfosGHGb1[forYear], cls.wfosGHGb0[forYear]),
name = 'fosGHG') * 1e6
return pd.concat([deltaTotalGHG, deltabioGHG, deltafosGHG], axis=1)
def calculateEmissions(cls):
# xls [df] - name of Excel spreadsheet to pull data from
# fProd [df] - list of products in current scenario
# rL [dict] - recycled content level by product
# f2pYld [df] - fiber to pulp yield by pulp product; indexed by fiber
# pulpYld [df] - pulp to product yield; indexed by pulp
# f2pVolNew [df] - fiber to pulp volume (in Mg); indexed by fiber code
# pbpVolNew [df] - pulp by product volume; indexed by pulp name
# consCollNew [df] - domestic consumption, collection, and recovery by product
pulpNames = cls.rPulp + cls.vPulp
mvO = [cls.pbpVolOld.loc[p] for p in pulpNames if 'Deinked' in p or 'Market' in p]
marketVolOld = pd.concat([mvO[0],mvO[1]], axis=1).T
mvN = [cls.pbpVolNew.loc[p] for p in pulpNames if 'Deinked' in p or 'Market' in p]
marketVolNew = pd.concat([mvN[0],mvN[1]], axis=1).T
# Chemical
chemImp = cls.calculateChem(cls.chemicals, cls.prodDemand)
# EoL
oldEoL = cls.calculateEoL(cls.eolEmissions, cls.consCollOld)
newEoL = cls.calculateEoL(cls.eolEmissions, cls.consCollNew)
# Energy
oldPulpPct = cls.getEnergyPulpPct(cls.pbpVolOld)
newPulpPct = cls.getEnergyPulpPct(cls.pbpVolNew)
oldPYCoeff = cls.getEnergyYldCoeff(cls.f2pVolOld, cls.pbpVolOld)
newPYCoeff = cls.getEnergyYldCoeff(cls.f2pVolNew, cls.pbpVolNew)
oldYldMultiplier = (oldPYCoeff / oldPYCoeff).fillna(0)
newYldMultiplier = (newPYCoeff / oldPYCoeff).fillna(0)
oldMP = cls.getEnergyMultiProd(oldYldMultiplier, oldPulpPct)
newMP = cls.getEnergyMultiProd(newYldMultiplier, newPulpPct)
oldEnergy = cls.calculateEnergy(cls.pbpVolOld, cls.prodLD, oldMP, cls.pwpEI.iloc[:-1], cls.pwpEI.iloc[-1])
newEnergy = cls.calculateEnergy(cls.pbpVolNew, cls.demandNew, newMP, cls.pwpEI.iloc[:-1], cls.pwpEI.iloc[-1])
# Production
oldProd = cls.calculateProduction(oldEnergy)
newProd = cls.calculateProduction(newEnergy)
# Fuel
oldFuel = cls.calculateFuel(oldEnergy)
newFuel = cls.calculateFuel(newEnergy)
# Residual
oldRsdl = cls.calculateResidual(cls.pbpVolOld, cls.f2pVolOld)
newRsdl = cls.calculateResidual(cls.pbpVolNew, cls.f2pVolNew)
# Transportation
oldFiberTrans = pd.Series(cls.calculateTrans(cls.f2pVolOld), name = 'fiberTrans')
oldMarketTrans = pd.Series(cls.calculateTrans(marketVolOld), name = 'marketTrans')
oldTrans = pd.concat([oldFiberTrans, oldMarketTrans, chemImp['chemTrans'], oldFuel['fuelTrans'],
oldRsdl['rsdlTrans'], oldEoL['eolTrans']], axis=1)
newFiberTrans = pd.Series(cls.calculateTrans(cls.f2pVolNew), name = 'fiberTrans')
newMarketTrans = pd.Series(cls.calculateTrans(marketVolNew), name = 'marketTrans')
newTrans = pd.concat([newFiberTrans, newMarketTrans, chemImp['chemTrans'], newFuel['fuelTrans'],
newRsdl['rsdlTrans'], newEoL['eolTrans']], axis=1)
# Export
exportImp = cls.calculateExport(cls.exportOld,cls.exportNew)
# FASOM/LURA
forestGHG = cls.calculateForest(cls.f2pVolNew.iloc[:,:-1].loc[cls.vFiber].sum().sum() -
cls.f2pVolOld.iloc[:,:-1].loc[cls.vFiber].sum().sum(), 90)
# Summary calcs for plotting
oldSums = pd.concat([pd.Series(chemImp['chemImp'], name='chemImp'),
pd.Series(oldFuel['bioFuelImp'], name='fuelbio'),
pd.Series(oldFuel['fesFuelImp'], name='fuelfos'),
pd.Series(oldProd['totalCO2'], name='prodImp'),
pd.Series(oldProd['bioCO2'], name='prodbio'),
pd.Series(oldProd['fesCO2'], name='prodfos'),
pd.Series(oldEnergy['totalEnergy'], name='energy'),
pd.Series(oldEnergy['bioEnergy'], name='energybio'),
pd.Series(oldEnergy['fesEnergy'], name='energyfos'),
pd.Series(oldRsdl['bio+fos'], name='residImp'),
pd.Series(oldRsdl['bioImp'], name='residbio'),
pd.Series(oldRsdl['fossilImp'], name='residfos'),
pd.Series(oldEoL['bftEoL'], name='eolImp'),
pd.Series(oldEoL['bioEoL'], name='eolbio'),
pd.Series(oldEoL['fesTransEoL'], name='eolfos'),
pd.Series(oldProd['bioCO2'] + oldRsdl['bioImp'] + oldEoL['bioEoL'], name='bioCO2'),
pd.Series(oldTrans.sum(axis=1) + chemImp['chemImp'] + oldFuel['fuelImp'] +
oldProd['fesCO2'] + oldRsdl['fossilImp'] + oldEoL['fesTransEoL'], name='fossilCO2'),
| pd.Series(oldProd['bioCO2'] + oldRsdl['bioImp'], name='g2gbio') | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 7 15:50:55 2020
@author: Emmett
"""
import nltk
nltk.download('stopwords')
nltk.download('wordnet')
import LDA_Sampler
import string
import copy
import pandas as pd
import numpy as np
import keras.backend as K
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
import kerastuner as kt
import IPython
from keras import regularizers
from keras.models import Model
from numpy import linalg as LA
from nltk.corpus import stopwords
from scipy.special import gammaln
from keras.models import Sequential
from scipy.sparse import csr_matrix
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import TfidfVectorizer
from keras.layers import Dense, Activation, Embedding, LSTM
from nltk.corpus import stopwords
stoplist = stopwords.words('english')
make_singularRoot = nltk.stem.WordNetLemmatizer()
remove_ws = nltk.tokenize.WhitespaceTokenizer()
def preprocess(pd):
pd = pd.str.lower()
pd = pd.str.replace('[{}]'.format(string.punctuation), ' ')
pd = pd.apply(lambda x: [make_singularRoot.lemmatize(w) for w in remove_ws.tokenize(x)])
pd = pd.apply(lambda x: [item for item in x if item not in stoplist])
return pd.str.join(' ')
def get_x_lstm(max_vocab, vocab):
tokenizer = Tokenizer(nb_words = max_vocab, lower=True, split=' ')
tokenizer.fit_on_texts(vocab)
vocab_seq = tokenizer.texts_to_sequences(vocab)
return pad_sequences(vocab_seq)
def sampleFromDirichlet(samp):
return np.random.dirichlet(samp)
def sampleFromCategorical(samp):
samp = np.exp(samp)/np.exp(samp).sum()
return np.random.multinomial(1, samp).argmax()
def word_indices(wordOccuranceVec):
for i in wordOccuranceVec.nonzero()[0]:
for j in range(int(wordOccuranceVec[i])):
yield i
#maximum number of features
MAX_VOCAB_SIZE = 100
def processComments(samples, window_size = 5, MAX_VOCAB_SIZE = MAX_VOCAB_SIZE):
#Convert the collection of comments to a matrix of token counts
vectorizer = CountVectorizer(analyzer="word", tokenizer = None)
#Learn the vocabulary dictionary and return term-document matrix
train_data_features = vectorizer.fit_transform(samples)
#Array mapping from feature integer indices to feature name
words = vectorizer.get_feature_names()
vocab = dict(zip(words, np.arange(len(words))))
inv_vocab = dict(zip(np.arange(len(words)), words))
wordOccuranceMatrix = train_data_features.toarray()
return wordOccuranceMatrix, vocab, words
sort = True
import gensim
"""
from gensim.parsing.preprocessing import strip_non_alphanum
from gensim.parsing.preprocessing import strip_punctuation
from gensim.parsing.preprocessing import strip_multiple_whitespaces
from gensim.parsing.preprocessing import stem_text
corpus = []
sampNum = 1
while (sampNum < 186):
fileOpen = open("sample"+str(sampNum)+".txt","r")
temp = fileOpen.readlines()
temp1 = strip_non_alphanum(str(temp))
temp2 = strip_punctuation(temp1)
final = strip_multiple_whitespaces(temp2)
#final = stem_text(temp3)
corpus.append(final)
sampNum += 1
"""
data = pd.read_csv('keyword_comment_cleaned.csv')
user_ids = list(data.comment_parent_id.unique())
post_ids = list(data.post_id.unique())
num_user_ids = len(user_ids)
num_post_ids = len(post_ids)
comment_score = np.zeros((num_user_ids, num_post_ids))
for idx, i in enumerate(data[["comment_parent_id", "comment_score", "post_id"]].values):
comment_score[user_ids.index(i[0])][post_ids.index(i[2])] = i[1]
comment_score_normalized = comment_score/max(data.comment_score)
comments = [""] * num_post_ids
for i in data[["post_id", "comment_body"]].values:
comments[post_ids.index(i[0])] += i[1]
comments = pd.DataFrame(comments)
comments = preprocess(comments[0])
comments.shape
#corpusList = [i for item in corpus for i in item.split()]
matrix, vocabulary, words = processComments(comments)
num_topics = 9
lambda_param = 0.8
#Probabilistic Matrix Factorization (PMF)
#From paper: "effective recommendation model that uses matrix factorization (MF)
#technique to find the latent features of users and items from a probabilistic perspective"
#create user latent vector
user_weights = np.random.rand(num_topics, num_user_ids)
#create item (in this case, posts) latent vector
post_weights = np.random.rand(num_topics, num_post_ids)
beta = 0.01
alpha = 10*np.ones(num_topics)/num_topics
##############################################################################
#standardize text -- makes all characters lowercase and removes common words
texts = [[word for word in document.lower().split() if word not in stoplist]
for document in comments]
#count number of times that word appears in corpus
#pair frequency with respective word in new array
from collections import defaultdict
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
corpus_removeOne = [[token for token in text if frequency[token]>1] for text in texts]
from gensim import corpora
#add corpora to dictionary
dictionary = corpora.Dictionary(corpus_removeOne)
#save dictionary for future reference
dictionary.save('C:\\Users\\emmet\\.spyder-py3-dev\\redditTest.dict') #location of document in computer
#dict = gensim.corpora.Dictionary.load('redditTest.dict')
#assign numeric id to each token in dictionary
dictID = dictionary.token2id
#converts each word into vector following same process as example
bow_corpus = [dictionary.doc2bow(text) for text in corpus_removeOne]
corpora.MmCorpus.serialize('redditTest.mm', bow_corpus)
corp = gensim.corpora.MmCorpus('redditTest.mm')
print(bow_corpus)
from gensim import models
#from gensim.models import TfidfModel
tfidf = models.TfidfModel(bow_corpus)
corpus_tfidf = tfidf[bow_corpus]
for doc in corpus_tfidf:
if doc == []:
None
else:
print(doc)
lda_model = models.LdaModel(bow_corpus, id2word=dictionary, num_topics=9)
corpus_LDA = lda_model[bow_corpus]
print(corpus_LDA)
##############################################################################
lda = LDA_Sampler.LdaSampler(n_topics=num_topics, matrix_shape=matrix.shape, lambda_param=lambda_param)
"Long Short Term Memory"
lstm_out = 128
batch_size = 8
p_embedding_lstm = 200
X = get_x_lstm(MAX_VOCAB_SIZE, comments.values)
#Sequential: linear stack of layers
#Embedding: turns positive integers (indexes) into dense vectors of fixed size
#LSTM: long short term memory
#Dropout: regularization method where input and recurrent connections to LSTM
#units are probabilistically excluded from activation and weight updates while
#training a network
#Dense: densely-connected neural network layer
#Activation: element-wise activation function passed as the 'activation' argument
#Kernel: weights matrix created by the layer
#Compile: compile source into code object that can be executed by exec() or eval()
"""
model = Sequential()
model.add(Embedding(MAX_VOCAB_SIZE, p_embedding_lstm, input_length = X.shape[1]))
model.add(LSTM(lstm_out, dropout = 0.2))
model.add(Dense(num_topics, activation = 'tanh', name = "doc_latent_vector", kernel_regularizer = regularizers.l2()))
model.compile(loss = 'mean_squared_error', optimizer = 'rmsprop', metrics = ['accuracy'])
model.summary()
"""
'Create a Data Set from which the Model will Validate'
data_predict = pd.read_csv('test_input.csv')
post_ids_predict = list(data_predict.post_id.unique())
num_post_ids_predict = len(post_ids_predict)
comments_predict = [""] * num_post_ids_predict
for i in data_predict[["post_id", "comment_body"]].values:
comments_predict[post_ids_predict.index(i[0])] += i[1]
comments_predict = | pd.DataFrame(comments_predict) | pandas.DataFrame |
import matplotlib
#matplotlib.use("qt4agg")
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.patches import Ellipse
import seaborn as sns
from matplotlib.path import Path
import os
#plt.ion()
#plt.show(block=False)
'''
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
'''
class dynamic_hist():
def __init__(self,ax,resol=1,c='g',label='',point_flag = False,save=False,plot_n=-1):
self.point_flag = point_flag
self.ax = ax
self.save = save
self.x,self.y = np.array([]),np.array([])
self.c = c
self.resol = resol
self.bin_width = resol - (resol*0.1)
self.offset = resol/2
self.bins=np.arange(0,100+resol,resol)
self.plot_n = plot_n
hist, bins = np.histogram([], [],normed=True,density = True)
self.sp = []
self.sliding_window = 10
#self.sp = self.ax.hist(hist,bins = bins, color= self.c,alpha=0.7,label=label)
#self.sp = self.ax.bar(bins,hist)
# Change width depending on your bins
# Change width depending on your bins
self.label = label
def update_plot_elem(self,x,color=[],arg={}):
x *=100
nplots = arg['n_plots']
width = self.bin_width/nplots
bin_init_offset = self.bin_width/2
if self.save == True:
self.x = np.append(self.x,x)
else:
self.x = x
#min_v = min(self.x)
#max_v = max(self.x)
#self.bins = np.arange(min_v,max_v)
hist, bins = np.histogram(self.x, self.bins,normed=True,density = True)
max_val = hist.sum()
norm_hist = hist/max_val
if len(self.sp)==0:
bar_bins = bins[:-1] + self.offset - bin_init_offset + (self.plot_n*width)
self.sp = self.ax.bar(
bar_bins,
hist,
width=width,
label=self.label)
else:
for i in range(len(self.sp)):
self.sp[i].set_height(hist[i])
#hist, bins = np.histogram(self.x, self.bins,normed=True,density = True)
#self.sp[0] = hist
#self.sp[1] = bins
# self.ax.hist(hist,bins = bins, color= self.c,alpha=0.7,label=self.label)
class dynamic_scatter():
def __init__(self,ax,c='g',label='',point_flag = False,save=False,**arg):
self.point_flag = point_flag
self.ax = ax
self.save = save
self.x,self.y = np.array([]),np.array([])
self.marker = 'o'
self.scale = 20
if 'marker' in arg:
self. marker = arg['marker']
if 'scale' in arg:
self.scale = arg['scale']
self.sp = self.ax.scatter([],[],s =self.scale ,c = c,marker = self.marker, label= label)
def update_plot_elem(self,x,y,color=[],arg = {}):
if self.save == True:
self.x = np.append(self.x,x)
self.y = np.append(self.y,y)
#self.y.append(y)
else:
self.x = x
self.y = y
data = np.c_[self.x,self.y]
self.sp.set_offsets(data)
class dynamic_plot_elm():
def __init__(self,ax,c='g',label='',point_flag = False ,save=False , **kwarg):
self.point_flag = point_flag
self.window = kwarg['window']
self.ax = ax
self.color = c
linestyle = '-'
scale = 2
if 'scale' in kwarg:
scale = kwarg['scale']
if 'linestyle' in kwarg:
linestyle = kwarg['linestyle']
self.fill = self.ax.fill_between([],[],color=c)
self.p, = self.ax.plot([],[],color = c,label=label,linewidth=scale,linestyle = linestyle)
self.save = save
self.x,self.y = np.array([]),np.array([])
def fill_area(self,std):
self.fill.remove()
low_y = self.y-std
up_y = self.y+std
self.fill = self.ax.fill_between(self.x,up_y,low_y,edgecolor='#1B2ACC', facecolor='#1B2ACC',alpha=0.3)
def update_plot_elem(self,x,y,color=[],arg={}):
if self.save == True:
self.x = np.append(self.x,x)
self.y = np.append(self.y,y)
else:
self.x = x
self.y = y
df = pd.DataFrame({'x':self.x ,'y':self.y})
if len(self.x)>self.window and self.window >0 :
# calculate a 60 day rolling mean and plot
mean_pts = df.rolling(window=self.window).mean()
mpx = mean_pts['x'][pd.isna(mean_pts['x']) == False].to_numpy()
mpy = mean_pts['y'][ | pd.isna(mean_pts['y']) | pandas.isna |
# License: Apache-2.0
import databricks.koalas as ks
import pandas as pd
import numpy as np
import pytest
from pandas.testing import assert_frame_equal
from gators.imputers.numerics_imputer import NumericsImputer
from gators.imputers.int_imputer import IntImputer
from gators.imputers.float_imputer import FloatImputer
from gators.imputers.object_imputer import ObjectImputer
ks.set_option('compute.default_index_type', 'distributed-sequence')
@pytest.fixture()
def data():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', None], 'F': ['a', 'a', 's', np.nan]})
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]})
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]})
X_object_expected = pd.DataFrame(
{'E': ['q', 'w', 'w', 'MISSING'], 'F': ['a', 'a', 's', 'MISSING']})
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
'object': X_object_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_num():
X_int = pd.DataFrame(
{'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]},
dtype=np.float32)
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]},
dtype=np.float32)
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]},
dtype=np.float32)
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]},
dtype=np.float32)
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
X_dict = {
'int': X_int,
'float': X_float,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_no_missing():
X_int = pd.DataFrame({'A': [0, 1, 1, 8], 'B': [3, 4, 4, 8]}, dtype=int)
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 9.], 'D': [2.1, 3.1, 4.1, 9.]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', 'x'], 'F': ['a', 'a', 's', 'x']})
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_expected_dict = {
'int': X_int.copy(),
'float': X_float.copy(),
'object': X_object.copy(),
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture
def data_full():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -9.0, 1.1, 3.1, 'w', 'a']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_int = IntImputer(strategy='constant', value=-9).fit(X)
obj_float = FloatImputer(strategy='median').fit(X)
obj_object = ObjectImputer(strategy='most_frequent').fit(X)
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X, X_expected
@pytest.fixture()
def data_ks():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', None], 'F': ['a', 'a', 's', np.nan]})
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]})
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]})
X_object_expected = pd.DataFrame(
{'E': ['q', 'w', 'w', 'MISSING'], 'F': ['a', 'a', 's', 'MISSING']})
X_int_ks = ks.from_pandas(X_int)
X_float_ks = ks.from_pandas(X_float)
X_object_ks = ks.from_pandas(X_object)
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_dict = {
'int': X_int_ks,
'float': X_float_ks,
'object': X_object_ks,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
'object': X_object_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_num_ks():
X_int = ks.DataFrame(
{'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]},
dtype=np.float32)
X_float = ks.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]},
dtype=np.float32)
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]},
dtype=np.float32)
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]},
dtype=np.float32)
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
X_dict = {
'int': X_int,
'float': X_float,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_no_missing_ks():
X_int = ks.DataFrame({'A': [0, 1, 1, 8], 'B': [3, 4, 4, 8]}, dtype=int)
X_float = ks.DataFrame(
{'C': [0.1, 1.1, 2.1, 9.], 'D': [2.1, 3.1, 4.1, 9.]})
X_object = ks.DataFrame(
{'E': ['q', 'w', 'w', 'x'], 'F': ['a', 'a', 's', 'x']})
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_expected_dict = {
'int': X_int.to_pandas().copy(),
'float': X_float.to_pandas().copy(),
'object': X_object.to_pandas().copy(),
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture
def data_full_ks():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = ks.from_pandas(pd.concat([X_int, X_float, X_object], axis=1))
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -9.0, 1.1, 3.1, 'w', 'a']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_int = IntImputer(strategy='constant', value=-9).fit(X)
obj_float = FloatImputer(strategy='median').fit(X)
obj_object = ObjectImputer(strategy='most_frequent').fit(X)
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X, X_expected
def test_int_pd(data):
objs_dict, X_dict, X_expected_dict = data
assert_frame_equal(
objs_dict['int'].transform(X_dict['int']), X_expected_dict['int'],
)
def test_float_pd(data):
objs_dict, X_dict, X_expected_dict = data
assert_frame_equal(
objs_dict['float'].transform(
X_dict['float']), X_expected_dict['float'],
)
def test_object_pd(data):
objs_dict, X_dict, X_expected_dict = data
assert_frame_equal(
objs_dict['object'].transform(
X_dict['object']), X_expected_dict['object'],
)
@pytest.mark.koalas
def test_int_ks(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
assert_frame_equal(
objs_dict['int'].transform(X_dict['int']).to_pandas(),
X_expected_dict['int'],)
@pytest.mark.koalas
def test_float_ks(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
assert_frame_equal(
objs_dict['float'].transform(X_dict['float']).to_pandas(),
X_expected_dict['float'])
@pytest.mark.koalas
def test_object_ks(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
assert_frame_equal(
objs_dict['object'].transform(X_dict['object']).to_pandas(),
X_expected_dict['object'],
)
def test_int_pd_np(data):
objs_dict, X_dict, X_expected_dict = data
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
def test_float_pd_np(data):
objs_dict, X_dict, X_expected_dict = data
X_new_np = objs_dict['float'].transform_numpy(X_dict['float'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['float'].columns)
assert_frame_equal(X_new, X_expected_dict['float'])
def test_object_pd_np(data):
objs_dict, X_dict, X_expected_dict = data
X_new_np = objs_dict['object'].transform_numpy(X_dict['object'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['object'].columns)
assert_frame_equal(X_new, X_expected_dict['object'])
@pytest.mark.koalas
def test_int_ks_np(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
@pytest.mark.koalas
def test_float_ks_np(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
X_new_np = objs_dict['float'].transform_numpy(
X_dict['float'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['float'].columns)
assert_frame_equal(X_new, X_expected_dict['float'])
@pytest.mark.koalas
def test_object_ks_np(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
X_new_np = objs_dict['object'].transform_numpy(
X_dict['object'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['object'].columns)
assert_frame_equal(X_new, X_expected_dict['object'])
def test_num_int_pd(data_num):
objs_dict, X_dict, X_expected_dict = data_num
assert_frame_equal(
objs_dict['int'].transform(X_dict['int']), X_expected_dict['int'],
)
def test_num_float_pd(data_num):
objs_dict, X_dict, X_expected_dict = data_num
assert_frame_equal(
objs_dict['float'].transform(
X_dict['float']), X_expected_dict['float'],
)
@pytest.mark.koalas
def test_num_int_ks(data_num_ks):
objs_dict, X_dict, X_expected_dict = data_num_ks
assert_frame_equal(objs_dict['int'].transform(
X_dict['int'].to_pandas()), X_expected_dict['int'],
)
@pytest.mark.koalas
def test_num_float_ks(data_num_ks):
objs_dict, X_dict, X_expected_dict = data_num_ks
assert_frame_equal(objs_dict['float'].transform(
X_dict['float'].to_pandas()), X_expected_dict['float'],
)
def test_num_int_pd_np(data_num):
objs_dict, X_dict, X_expected_dict = data_num
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
def test_num_float_pd_np(data_num):
objs_dict, X_dict, X_expected_dict = data_num
X_new_np = objs_dict['float'].transform_numpy(X_dict['float'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['float'].columns)
assert_frame_equal(X_new, X_expected_dict['float'])
@pytest.mark.koalas
def test_num_int_ks_np(data_num_ks):
objs_dict, X_dict, X_expected_dict = data_num_ks
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
@pytest.mark.koalas
def test_num_float_ks_np(data_num_ks):
objs_dict, X_dict, X_expected_dict = data_num_ks
X_new_np = objs_dict['float'].transform_numpy(
X_dict['float'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['float'].columns)
assert_frame_equal(X_new, X_expected_dict['float'])
def test_no_missing_int_pd(data_no_missing):
objs_dict, X_dict, X_expected_dict = data_no_missing
assert_frame_equal(
objs_dict['int'].transform(X_dict['int']), X_expected_dict['int'],
)
def test_no_missing_float_pd(data_no_missing):
objs_dict, X_dict, X_expected_dict = data_no_missing
assert_frame_equal(
objs_dict['float'].transform(
X_dict['float']), X_expected_dict['float'],
)
def test_no_missing_object_pd(data_no_missing):
objs_dict, X_dict, X_expected_dict = data_no_missing
assert_frame_equal(
objs_dict['object'].transform(
X_dict['object']), X_expected_dict['object'],
)
@pytest.mark.koalas
def test_no_missing_int_ks(data_no_missing_ks):
objs_dict, X_dict, X_expected_dict = data_no_missing_ks
assert_frame_equal(objs_dict['int'].transform(
X_dict['int'].to_pandas()), X_expected_dict['int'],
)
@pytest.mark.koalas
def test_no_missing_float_ks(data_no_missing_ks):
objs_dict, X_dict, X_expected_dict = data_no_missing_ks
assert_frame_equal(objs_dict['float'].transform(
X_dict['float'].to_pandas()), X_expected_dict['float'],
)
@pytest.mark.koalas
def test_no_missing_object_ks(data_no_missing_ks):
objs_dict, X_dict, X_expected_dict = data_no_missing_ks
assert_frame_equal(objs_dict['object'].transform(
X_dict['object'].to_pandas()), X_expected_dict['object'],
)
def test_no_missing_int_pd_np(data_no_missing):
objs_dict, X_dict, X_expected_dict = data_no_missing
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
def test_no_missing_float_pd_np(data_no_missing):
objs_dict, X_dict, X_expected_dict = data_no_missing
X_new_np = objs_dict['float'].transform_numpy(X_dict['float'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['float'].columns)
assert_frame_equal(X_new, X_expected_dict['float'])
def test_no_missing_object_pd_np(data_no_missing):
objs_dict, X_dict, X_expected_dict = data_no_missing
X_new_np = objs_dict['object'].transform_numpy(X_dict['object'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['object'].columns)
assert_frame_equal(X_new, X_expected_dict['object'])
@pytest.mark.koalas
def test_no_missing_int_ks_np(data_no_missing_ks):
objs_dict, X_dict, X_expected_dict = data_no_missing_ks
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
@pytest.mark.koalas
def test_no_missing_float_ks_np(data_no_missing_ks):
objs_dict, X_dict, X_expected_dict = data_no_missing_ks
X_new_np = objs_dict['float'].transform_numpy(
X_dict['float'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['float'].columns)
assert_frame_equal(X_new, X_expected_dict['float'])
@pytest.mark.koalas
def test_no_missing_object_ks_np(data_no_missing_ks):
objs_dict, X_dict, X_expected_dict = data_no_missing_ks
X_new_np = objs_dict['object'].transform_numpy(
X_dict['object'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['object'].columns)
assert_frame_equal(X_new, X_expected_dict['object'])
def test_full_pd(data_full):
objs_dict, X, X_expected = data_full
X_new = objs_dict['object'].transform(X)
X_new = objs_dict['int'].transform(X_new)
X_new = objs_dict['float'].transform(X_new)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_full_ks(data_full_ks):
objs_dict, X, X_expected = data_full_ks
X_new = objs_dict['object'].transform(X)
X_new = objs_dict['int'].transform(X_new)
X_new = objs_dict['float'].transform(X_new)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_full_pd_np(data_full):
objs_dict, X, X_expected = data_full
X_new = objs_dict['object'].transform_numpy(X.to_numpy())
X_new = objs_dict['int'].transform_numpy(X_new)
X_new = objs_dict['float'].transform_numpy(X_new)
X_new = pd.DataFrame(X_new, columns=['A', 'B', 'C', 'D', 'E', 'F'])
assert_frame_equal(X_new, X_expected.astype(object))
@pytest.mark.koalas
def test_full_ks_np(data_full_ks):
objs_dict, X, X_expected = data_full_ks
X_new = objs_dict['object'].transform_numpy(X.to_numpy())
X_new = objs_dict['int'].transform_numpy(X_new)
X_new = objs_dict['float'].transform_numpy(X_new)
X_new = pd.DataFrame(X_new, columns=['A', 'B', 'C', 'D', 'E', 'F'])
assert_frame_equal(X_new, X_expected.astype(object))
def test_imputers_columns_pd():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -99.0, -999.0, -9999.0, 'missing', 'MISSING']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_int_A = IntImputer(
strategy='constant', value=-9, columns=['A']).fit(X)
obj_int_B = IntImputer(
strategy='constant', value=-99, columns=['B']).fit(X)
obj_float_C = FloatImputer(
strategy='constant', value=-999., columns=['C']).fit(X)
obj_float_D = FloatImputer(
strategy='constant', value=-9999., columns=['D']).fit(X)
obj_object_E = ObjectImputer(
strategy='constant', value='missing', columns=['E']).fit(X)
obj_object_F = ObjectImputer(
strategy='constant', value='MISSING', columns=['F']).fit(X)
X_new = obj_int_A.transform(X)
X_new = obj_int_B.transform(X_new)
X_new = obj_float_C.transform(X_new)
X_new = obj_float_D.transform(X_new)
X_new = obj_object_E.transform(X_new)
X_new = obj_object_F.transform(X_new)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_imputers_columns_ks():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X = ks.from_pandas(X)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -99.0, -999.0, -9999.0, 'missing', 'MISSING']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_int_A = IntImputer(
strategy='constant', value=-9, columns=['A']).fit(X)
obj_int_B = IntImputer(
strategy='constant', value=-99, columns=['B']).fit(X)
obj_float_C = FloatImputer(
strategy='constant', value=-999., columns=['C']).fit(X)
obj_float_D = FloatImputer(
strategy='constant', value=-9999., columns=['D']).fit(X)
obj_object_E = ObjectImputer(
strategy='constant', value='missing', columns=['E']).fit(X)
obj_object_F = ObjectImputer(
strategy='constant', value='MISSING', columns=['F']).fit(X)
X_new = obj_int_A.transform(X)
X_new = obj_int_B.transform(X_new)
X_new = obj_float_C.transform(X_new)
X_new = obj_float_D.transform(X_new)
X_new = obj_object_E.transform(X_new)
X_new = obj_object_F.transform(X_new)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_imputers_columns_pd_np():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -99.0, -999.0, -9999.0, 'missing', 'MISSING']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_int_A = IntImputer(
strategy='constant', value=-9, columns=['A']).fit(X)
obj_int_B = IntImputer(
strategy='constant', value=-99, columns=['B']).fit(X)
obj_float_C = FloatImputer(
strategy='constant', value=-999., columns=['C']).fit(X)
obj_float_D = FloatImputer(
strategy='constant', value=-9999., columns=['D']).fit(X)
obj_object_E = ObjectImputer(
strategy='constant', value='missing', columns=['E']).fit(X)
obj_object_F = ObjectImputer(
strategy='constant', value='MISSING', columns=['F']).fit(X)
X_new = obj_int_A.transform_numpy(X.to_numpy())
X_new = obj_int_B.transform_numpy(X_new)
X_new = obj_float_C.transform_numpy(X_new)
X_new = obj_float_D.transform_numpy(X_new)
X_new = obj_object_E.transform_numpy(X_new)
X_new = obj_object_F.transform_numpy(X_new)
assert_frame_equal(
pd.DataFrame(X_new, columns=list('ABCDEF')),
X_expected.astype(object))
@pytest.mark.koalas
def test_imputers_columns_ks_np():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X = ks.from_pandas(X)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -99.0, -999.0, -9999.0, 'missing', 'MISSING']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_int_A = IntImputer(
strategy='constant', value=-9, columns=['A']).fit(X)
obj_int_B = IntImputer(
strategy='constant', value=-99, columns=['B']).fit(X)
obj_float_C = FloatImputer(
strategy='constant', value=-999., columns=['C']).fit(X)
obj_float_D = FloatImputer(
strategy='constant', value=-9999., columns=['D']).fit(X)
obj_object_E = ObjectImputer(
strategy='constant', value='missing', columns=['E']).fit(X)
obj_object_F = ObjectImputer(
strategy='constant', value='MISSING', columns=['F']).fit(X)
X_new = obj_int_A.transform_numpy(X.to_numpy())
X_new = obj_int_B.transform_numpy(X_new)
X_new = obj_float_C.transform_numpy(X_new)
X_new = obj_float_D.transform_numpy(X_new)
X_new = obj_object_E.transform_numpy(X_new)
X_new = obj_object_F.transform_numpy(X_new)
assert_frame_equal(
pd.DataFrame(X_new, columns=list('ABCDEF')),
X_expected.astype(object))
def test_imputers_num_pd():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -9.0, -9.0, -9.0, 'MISSING', 'MISSING']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_num = NumericsImputer(
strategy='constant', value=-9.).fit(X)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X)
X_new = obj_num.transform(X)
X_new = obj_object.transform(X_new)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_imputers_num_ks():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X = ks.from_pandas(X)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -9.0, -9.0, -9.0, 'MISSING', 'MISSING']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_num = NumericsImputer(
strategy='constant', value=-9.).fit(X)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X)
X_new = obj_num.transform(X)
X_new = obj_object.transform(X_new)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_imputers_num_pd_np():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -9.0, -9.0, -9.0, 'MISSING', 'MISSING']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_num = NumericsImputer(
strategy='constant', value=-9.).fit(X)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X)
X_new = obj_num.transform_numpy(X.to_numpy())
X_new = obj_object.transform_numpy(X_new)
assert_frame_equal(
pd.DataFrame(X_new, columns=list('ABCDEF')),
X_expected.astype(object))
@pytest.mark.koalas
def test_imputers_num_ks_np():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X = ks.from_pandas(X)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -9.0, -9.0, -9.0, 'MISSING', 'MISSING']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_num = NumericsImputer(
strategy='constant', value=-9.).fit(X)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X)
X_new = obj_num.transform_numpy(X.to_numpy())
X_new = obj_object.transform_numpy(X_new)
assert_frame_equal(
pd.DataFrame(X_new, columns=list('ABCDEF')),
X_expected.astype(object))
def test_num_np():
X = | pd.DataFrame({'A': [0, 1, np.nan]}) | pandas.DataFrame |
import os
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
import numpy as np
import pandas as pd
from pyqmc.mc import vmc, initial_guess
from pyscf import gto, scf
from pyqmc.reblock import reblock
from pyqmc.slater import Slater
from pyqmc.accumulators import EnergyAccumulator
import pytest
@pytest.mark.slow
def test_vmc(C2_ccecp_rhf):
"""
Test that a VMC calculation of a Slater determinant matches Hartree-Fock within error bars.
"""
mol, mf = C2_ccecp_rhf
nconf = 500
nsteps = 300
warmup = 30
wf = Slater(mol, mf)
coords = initial_guess(mol, nconf)
df, coords = vmc(
wf,
coords,
nblocks=int(nsteps / 30),
nsteps_per_block=30,
accumulators={"energy": EnergyAccumulator(mol)},
)
df = | pd.DataFrame(df) | pandas.DataFrame |
import pandas as pd
import numpy as np
from web_constants import *
from signatures import Signatures, get_signatures_by_mut_type
from project_data import ProjectData, get_selected_project_data
def compute_counts(chosen_sigs, projects, mut_type, single_sample_id=None, normalize=False):
signatures = get_signatures_by_mut_type({mut_type: chosen_sigs})[mut_type]
project_data = get_selected_project_data(projects)
counts_df = pd.DataFrame(index=[], data=[], columns=signatures.get_contexts())
for proj in project_data:
proj_id = proj.get_proj_id()
samples = proj.get_samples_list()
if single_sample_id != None:
if single_sample_id in samples:
samples = [single_sample_id]
else:
continue
proj_counts_df = | pd.DataFrame(index=samples, columns=[]) | pandas.DataFrame |
import itertools
import numba as nb
import numpy as np
import pandas as pd
import pytest
from sid.contacts import _consolidate_reason_of_infection
from sid.contacts import _numpy_replace
from sid.contacts import calculate_infections_by_contacts
from sid.contacts import create_group_indexer
@pytest.mark.unit
@pytest.mark.parametrize(
"states, group_code_name, expected",
[
(
pd.DataFrame({"a": [1] * 7 + [0] * 8}),
"a",
[list(range(7, 15)), list(range(7))],
),
(
pd.DataFrame({"a": pd.Series([0, 1, 2, 3, 0, 1, 2, 3]).astype("category")}),
"a",
[[0, 4], [1, 5], [2, 6], [3, 7]],
),
(
pd.DataFrame({"a": pd.Series([0, 1, 2, 3, 0, 1, 2, -1])}),
"a",
[[0, 4], [1, 5], [2, 6], [3]],
),
],
)
def test_create_group_indexer(states, group_code_name, expected):
result = create_group_indexer(states, group_code_name)
result = [r.tolist() for r in result]
assert result == expected
@pytest.fixture()
def households_w_one_infected():
states = pd.DataFrame(
{
"infectious": [True] + [False] * 7,
"cd_infectious_true": [-1] * 8,
"immunity": [1.0] + [0.0] * 7,
"group_codes_households": [0] * 4 + [1] * 4,
"households": [0] * 4 + [1] * 4,
"group_codes_non_rec": [0] * 4 + [1] * 4,
"n_has_infected": 0,
"virus_strain": pd.Series(["base_strain"] + [pd.NA] * 7, dtype="category"),
}
)
params = pd.DataFrame(
columns=["value"],
data=1,
index=pd.MultiIndex.from_tuples(
[("infection_prob", "households", "households")]
),
)
indexers = {"recurrent": nb.typed.List()}
indexers["recurrent"].append(create_group_indexer(states, ["households"]))
assortative_matching_cum_probs = nb.typed.List()
assortative_matching_cum_probs.append(np.zeros((0, 0)))
group_codes_info = {"households": {"name": "group_codes_households"}}
virus_strains = {
"names": ["base_strain"],
"contagiousness_factor": np.ones(1),
"immunity_resistance_factor": np.zeros(1),
}
return {
"states": states,
"recurrent_contacts": np.ones((len(states), 1), dtype=bool),
"random_contacts": None,
"params": params,
"indexers": indexers,
"assortative_matching_cum_probs": assortative_matching_cum_probs,
"group_codes_info": group_codes_info,
"susceptibility_factor": np.ones(len(states)),
"virus_strains": virus_strains,
"seasonality_factor": pd.Series([1], index=["households"]),
}
@pytest.mark.integration
def test_calculate_infections_only_recurrent_all_participate(
households_w_one_infected,
):
(
calc_infected,
calc_n_has_additionally_infected,
calc_missed_contacts,
was_infected_by,
) = calculate_infections_by_contacts(
**households_w_one_infected,
contact_models={"households": {"is_recurrent": True}},
seed=itertools.count(),
)
states = households_w_one_infected["states"]
exp_infected = | pd.Series([-1] + [0] * 3 + [-1] * 4, dtype="int8") | pandas.Series |
import os
import pandas as pd
def print_best(result_dir):
res = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Series,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
def test_where_unsafe_int(any_signed_int_numpy_dtype):
s = Series(np.arange(10), dtype=any_signed_int_numpy_dtype)
mask = s < 5
s[mask] = range(2, 7)
expected = Series(
list(range(2, 7)) + list(range(5, 10)),
dtype=any_signed_int_numpy_dtype,
)
tm.assert_series_equal(s, expected)
def test_where_unsafe_float(float_numpy_dtype):
s = Series(np.arange(10), dtype=float_numpy_dtype)
mask = s < 5
s[mask] = range(2, 7)
data = list(range(2, 7)) + list(range(5, 10))
expected = Series(data, dtype=float_numpy_dtype)
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize(
"dtype,expected_dtype",
[
(np.int8, np.float64),
(np.int16, np.float64),
(np.int32, np.float64),
(np.int64, np.float64),
(np.float32, np.float32),
(np.float64, np.float64),
],
)
def test_where_unsafe_upcast(dtype, expected_dtype):
# see gh-9743
s = Series(np.arange(10), dtype=dtype)
values = [2.5, 3.5, 4.5, 5.5, 6.5]
mask = s < 5
expected = Series(values + list(range(5, 10)), dtype=expected_dtype)
s[mask] = values
tm.assert_series_equal(s, expected)
def test_where_unsafe():
# see gh-9731
s = Series(np.arange(10), dtype="int64")
values = [2.5, 3.5, 4.5, 5.5]
mask = s > 5
expected = Series(list(range(6)) + values, dtype="float64")
s[mask] = values
tm.assert_series_equal(s, expected)
# see gh-3235
s = Series(np.arange(10), dtype="int64")
mask = s < 5
s[mask] = range(2, 7)
expected = Series(list(range(2, 7)) + list(range(5, 10)), dtype="int64")
tm.assert_series_equal(s, expected)
assert s.dtype == expected.dtype
s = Series(np.arange(10), dtype="int64")
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype="int64")
| tm.assert_series_equal(s, expected) | pandas._testing.assert_series_equal |
# third party import
import pytest
from os import path
import pandas as pd
# module import
from dependencynet.model import ModelBuilder
from dependencynet.core.model.tree_model import TreeModelBuilder
@pytest.fixture
def source_data_towns(schema_towns, compact_columns_towns):
filename = path.join('tests', 'resources', 'data', 'compact', 'towns.csv')
data = pd.read_csv(filename, delimiter=';')
df = | pd.DataFrame(data, columns=compact_columns_towns) | pandas.DataFrame |
from abc import ABC
import pandas as pd
import numpy as np
import statsmodels.api as sm
from statsmodels.tsa.statespace.sarimax import SARIMAX
# Construct the model
class StateSpaceModel(sm.tsa.statespace.MLEModel, ABC):
def __init__(self, endog, exog, factors_x, factors_y):
# Initialize the state space model
endog_aug = pd.concat([exog, endog.to_frame()], axis=1)
k_states = k_posdef = factors_x + factors_y
super(StateSpaceModel, self).__init__(endog_aug, k_states=k_states, k_posdef=k_posdef,
initialization='approximate_diffuse')
self._covariates = endog_aug.columns
self._factors_x = factors_x
self._factors_y = factors_y
self._param_cov_state_f_idx = self._params_cov_state_z_idx = self._params_cov_obs_idx = None
self._params_phi_1_idx = self._params_phi_23_idx = self._params_phi_23_idx = None
self._params_a_1_idx = self._params_a_2_idx = None
# Setup the fixed components of the state space representation
transition_f = np.hstack((np.eye(factors_x), np.zeros((factors_x, factors_y))))
transition_z = np.hstack((np.zeros((factors_y, factors_x)), np.eye(factors_y)))
transition = np.vstack((transition_f, transition_z))
dims_x = endog_aug.shape[1] - 1
dims_y = 1
self._dims_x = dims_x
self._dims_y = dims_y
# Assume [x, y]'
design_x = np.hstack((np.ones((dims_x, factors_x)), np.zeros((dims_x, factors_y))))
design_y = np.hstack((np.zeros((dims_y, factors_x)), np.ones((dims_y, factors_y))))
design = np.vstack((design_x, design_y))
self.ssm['design'] = design.reshape((dims_x + 1, k_states, 1))
self.ssm['transition'] = transition.reshape((k_states, k_states, 1))
self.ssm['selection'] = np.eye(k_states)
self.ssm['obs_intercept'] = np.zeros(dims_x + dims_y).reshape(-1, 1)
# Cache some indices
self._state_cov_idx = np.diag_indices(k_posdef)
self._obs_cov_idx = np.diag_indices(dims_x + dims_y)
# grid_transition_f = (np.repeat(np.arange(factors_x), factors_x),
# np.tile(np.arange(factors_x), factors_x))
grid_transition_f = np.diag_indices(factors_x)
grid_transition_z = (np.repeat(np.arange(factors_x, k_states), k_states),
np.tile(np.arange(k_states), factors_y))
self._transition_f_idx = grid_transition_f
self._transition_z_idx = grid_transition_z
grid_design_x = (np.repeat(np.arange(dims_x), factors_x),
np.tile(np.arange(factors_x), dims_x))
grid_design_y = (np.repeat(np.arange(dims_x, dims_x + dims_y), factors_y),
np.tile(np.arange(factors_x, k_states), dims_y))
self._design_x_idx = grid_design_x
self._design_y_idx = grid_design_y
self.init_param_indx()
@staticmethod
def get_position(idx, i, row_offset=0, col_offset=0):
return idx[0][i]-row_offset, idx[1][i]-col_offset
def init_param_indx(self):
c = 0
params_cov_obs = ['sigma2.%s' % i for i in self._covariates]
self._params_cov_obs_idx = (c, c + len(params_cov_obs))
c += len(params_cov_obs)
params_cov_state_f = ['sigma2.f.%i' % i for i in range(self._factors_x)]
self._param_cov_state_f_idx = (c, c + len(params_cov_state_f))
c += len(params_cov_state_f)
params_cov_state_z = ['sigma2.z.%i' % i for i in range(self._factors_y)]
self._params_cov_state_z_idx = (c, c + len(params_cov_state_z))
c += len(params_cov_state_z)
params_cov = params_cov_state_f + params_cov_state_z + params_cov_obs
params_phi_1 = ['phi.1.%i%i' % self.get_position(self._transition_f_idx, i) for i in range(len(self._transition_f_idx[0]))]
self._params_phi_1_idx = (c, c+len(params_phi_1))
c += len(params_phi_1)
params_phi_23 = ['phi.23.%i%i' % self.get_position(self._transition_z_idx, i,
row_offset=self._factors_x) for i in range(len(self._transition_z_idx[0]))]
self._params_phi_23_idx = (c, c + len(params_phi_23))
c += len(params_phi_23)
params_phi = params_phi_1 + params_phi_23
params_a_1 = ['a.1.%i%i' % self.get_position(self._design_x_idx, i) for i in range(len(self._design_x_idx[0]))]
self._params_a_1_idx = (c, c+len(params_a_1))
c += len(params_a_1)
params_a_2 = ['a.2.%i%i' % self.get_position(self._design_y_idx, i,
row_offset=self._dims_x,
col_offset=self._factors_x) for i in range(len(self._design_y_idx[0]))]
self._params_a_2_idx = (c, c+len(params_a_2))
c += len(params_a_2)
params_a = params_a_1 + params_a_2
return params_cov + params_phi + params_a
@property
def param_names(self):
return self.init_param_indx()
# Describe how parameters enter the model
def update(self, params, *args, **kwargs):
params = super(StateSpaceModel, self).update(params, *args, **kwargs)
# Observation covariance
self.ssm[('obs_cov',) + self._obs_cov_idx] = params[self._params_cov_obs_idx[0]:self._params_cov_obs_idx[1]]
# State covariance
self.ssm[('state_cov',) + self._state_cov_idx] = params[self._param_cov_state_f_idx[0]:self._params_cov_state_z_idx[1]]
# Transition matrix
self.ssm[('transition',) + self._transition_f_idx] = params[self._params_phi_1_idx[0]:self._params_phi_1_idx[1]]
self.ssm[('transition',) + self._transition_z_idx] = params[self._params_phi_23_idx[0]:self._params_phi_23_idx[1]]
# Design matrix
self.ssm[('design',) + self._design_x_idx] = params[self._params_a_1_idx[0]:self._params_a_1_idx[1]]
self.ssm[('design',) + self._design_y_idx] = params[self._params_a_2_idx[0]:self._params_a_2_idx[1]]
# Specify start parameters and parameter names
@property
def start_params(self):
design, obs_cov, state_cov, transition = self.generate_start_matrices()
params_state_cov = state_cov[self._state_cov_idx]
params_obs_cov = obs_cov[self._obs_cov_idx]
params_phi = np.concatenate((transition[self._transition_f_idx],
transition[self._transition_z_idx]), axis=0)
params_a = np.concatenate((design[self._design_x_idx],
design[self._design_y_idx]), axis=0)
return np.concatenate((params_obs_cov, params_state_cov, params_phi, params_a))
def generate_start_matrices(self):
_exog = pd.DataFrame(self.endog[:, :-1], columns=self._covariates[:-1]).interpolate().fillna(0)
_endog = pd.Series(self.endog[:, -1], name=self._covariates[-1]).interpolate().fillna(0)
cov = _exog.cov()
w, v = np.linalg.eig(cov)
factors = pd.DataFrame(np.dot(_exog, v[:, :self._factors_x]), index=_exog.index)
_model = SARIMAX(endog=_endog, exog=factors, order=(self._factors_y, 0, 0))
res = _model.fit(disp=False, maxiter=100)
params_arx = res.params
phi1 = np.eye(self._factors_x)
factors_coeff = params_arx.values[:self._factors_x].reshape(1, -1)
ar_coeff = params_arx.values[self._factors_x:-1].reshape(1, -1)
phi2 = np.vstack([factors_coeff, np.zeros((self._factors_y - 1, self._factors_x))])
phi3 = np.vstack([ar_coeff, np.eye(self._factors_y)[:-1, :]])
transition = np.vstack([np.hstack([phi1, np.zeros((self._factors_x, self._factors_y))]),
np.hstack([phi2, phi3])])
a1 = v.T[:, :self._factors_x]
a2 = np.eye(self._dims_y, self._factors_y)
design_x = np.hstack([a1, np.zeros((self._dims_x, self._factors_y))])
design_y = np.hstack([np.zeros((self._dims_y, self._factors_x)), a2])
design = np.vstack([design_x, design_y])
state_cov = np.eye(self.k_states)
obs_cov = np.eye(len(self._covariates))
obs_cov[-1, -1] = params_arx.values[-1]
return design, obs_cov, state_cov, transition
def transform_params(self, unconstrained):
constrained = unconstrained.copy()
for i1, i2 in [self._param_cov_state_f_idx, self._params_cov_state_z_idx, self._params_cov_obs_idx]:
constrained[i1:i2] = unconstrained[i1:i2] ** 2
return constrained
def untransform_params(self, constrained):
unconstrained = constrained.copy()
for i1, i2 in [self._param_cov_state_f_idx, self._params_cov_state_z_idx, self._params_cov_obs_idx]:
unconstrained[i1:i2] = constrained[i1:i2] ** 0.5
return unconstrained
if __name__ == "__main__":
import os
from src.d01_data.dengue_data_api import DengueDataApi
from src.d04_modeling.abstract_sm import AbstractSM
os.chdir('../')
dda = DengueDataApi(interpolate=False)
x1, x2, y1, y2 = dda.split_data(random=False)
factors_x = 3
factors_y = 3
abstract_model = AbstractSM(x_train=x1, y_train=y1, bias=False)
city = 'sj'
endog, exog = dda.format_data(x1.loc[city].copy(), y1.loc[city].copy(), interpolate=False)
endog_mean = endog.mean(axis=0)
endog_std = endog.std(axis=0)
exog_mean = exog.mean(axis=0)
exog_std = exog.std(axis=0)
endog = (endog - endog_mean) / endog_std
exog = (exog - exog_mean) / exog_std
print("-------------- DFM Model --------------")
model_dfmq = sm.tsa.DynamicFactorMQ(exog,
factors=factors_x,
factor_orders=1,
idiosyncratic_ar1=False)
results_dfmq = model_dfmq.fit(method='em')
state_names = pd.Index(['f%i' % i for i in range(factors_x)])
transition_df = pd.DataFrame(model_dfmq.ssm['transition'], index=state_names, columns=state_names)
print(transition_df.round(4).to_string())
design_df = pd.DataFrame(model_dfmq.ssm['design'],
index=exog.columns,
columns=state_names)
print(design_df.round(4).to_string())
print("-------------- SSM Model --------------")
model = StateSpaceModel(endog=endog, exog=exog, factors_x=factors_x, factors_y=factors_y)
model.update(model.start_params)
state_names = pd.Index(['f%i' % i for i in range(factors_x)]).append(pd.Index(['z%i' % i for i in range(factors_y)]))
transition_df = pd.DataFrame(model.ssm['transition'], index=state_names, columns=state_names)
print(transition_df.round(4).to_string())
design_df = pd.DataFrame(model.ssm['design'],
index=exog.columns.append(pd.Index([endog.name])),
columns=state_names)
print(design_df.round(4).to_string())
results = model.fit(maxiter=200)
transition_df = | pd.DataFrame(model.ssm['transition'], index=state_names, columns=state_names) | pandas.DataFrame |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import warnings
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast
import numpy as np # type: ignore
import pandas as pd # type: ignore
from elasticsearch import Elasticsearch
# Default number of rows displayed (different to pandas where ALL could be displayed)
DEFAULT_NUM_ROWS_DISPLAYED = 60
DEFAULT_CHUNK_SIZE = 10000
DEFAULT_CSV_BATCH_OUTPUT_SIZE = 10000
DEFAULT_PROGRESS_REPORTING_NUM_ROWS = 10000
DEFAULT_ES_MAX_RESULT_WINDOW = 10000 # index.max_result_window
DEFAULT_PAGINATION_SIZE = 5000 # for composite aggregations
PANDAS_VERSION: Tuple[int, ...] = tuple(
int(part) for part in pd.__version__.split(".") if part.isdigit()
)[:2]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
EMPTY_SERIES_DTYPE = pd.Series().dtype
def build_pd_series(
data: Dict[str, Any], dtype: Optional[np.dtype] = None, **kwargs: Any
) -> pd.Series:
"""Builds a pd.Series while squelching the warning
for unspecified dtype on empty series
"""
dtype = dtype or (EMPTY_SERIES_DTYPE if not data else dtype)
if dtype is not None:
kwargs["dtype"] = dtype
return pd.Series(data, **kwargs)
def docstring_parameter(*sub: Any) -> Callable[[Any], Any]:
def dec(obj: Any) -> Any:
obj.__doc__ = obj.__doc__.format(*sub)
return obj
return dec
class SortOrder(Enum):
ASC = 0
DESC = 1
@staticmethod
def reverse(order: "SortOrder") -> "SortOrder":
if order == SortOrder.ASC:
return SortOrder.DESC
return SortOrder.ASC
@staticmethod
def to_string(order: "SortOrder") -> str:
if order == SortOrder.ASC:
return "asc"
return "desc"
@staticmethod
def from_string(order: str) -> "SortOrder":
if order == "asc":
return SortOrder.ASC
return SortOrder.DESC
def elasticsearch_date_to_pandas_date(
value: Union[int, str], date_format: Optional[str]
) -> pd.Timestamp:
"""
Given a specific Elasticsearch format for a date datatype, returns the
'partial' `to_datetime` function to parse a given value in that format
**Date Formats: https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-date-format.html#built-in-date-formats
Parameters
----------
value: Union[int, str]
The date value.
date_format: str
The Elasticsearch date format (ex. 'epoch_millis', 'epoch_second', etc.)
Returns
-------
datetime: pd.Timestamp
From https://www.elastic.co/guide/en/elasticsearch/reference/current/date.html
Date formats can be customised, but if no format is specified then it uses the default:
"strict_date_optional_time||epoch_millis"
Therefore if no format is specified we assume either strict_date_optional_time
or epoch_millis.
"""
if date_format is None or isinstance(value, (int, float)):
try:
return pd.to_datetime(
value, unit="s" if date_format == "epoch_second" else "ms"
)
except ValueError:
return pd.to_datetime(value)
elif date_format == "epoch_millis":
return pd.to_datetime(value, unit="ms")
elif date_format == "epoch_second":
return pd.to_datetime(value, unit="s")
elif date_format == "strict_date_optional_time":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f%z", exact=False)
elif date_format == "basic_date":
return pd.to_datetime(value, format="%Y%m%d")
elif date_format == "basic_date_time":
return pd.to_datetime(value, format="%Y%m%dT%H%M%S.%f", exact=False)
elif date_format == "basic_date_time_no_millis":
return pd.to_datetime(value, format="%Y%m%dT%H%M%S%z")
elif date_format == "basic_ordinal_date":
return pd.to_datetime(value, format="%Y%j")
elif date_format == "basic_ordinal_date_time":
return pd.to_datetime(value, format="%Y%jT%H%M%S.%f%z", exact=False)
elif date_format == "basic_ordinal_date_time_no_millis":
return pd.to_datetime(value, format="%Y%jT%H%M%S%z")
elif date_format == "basic_time":
return pd.to_datetime(value, format="%H%M%S.%f%z", exact=False)
elif date_format == "basic_time_no_millis":
return pd.to_datetime(value, format="%H%M%S%z")
elif date_format == "basic_t_time":
return pd.to_datetime(value, format="T%H%M%S.%f%z", exact=False)
elif date_format == "basic_t_time_no_millis":
return pd.to_datetime(value, format="T%H%M%S%z")
elif date_format == "basic_week_date":
return pd.to_datetime(value, format="%GW%V%u")
elif date_format == "basic_week_date_time":
return pd.to_datetime(value, format="%GW%V%uT%H%M%S.%f%z", exact=False)
elif date_format == "basic_week_date_time_no_millis":
return pd.to_datetime(value, format="%GW%V%uT%H%M%S%z")
elif date_format == "strict_date":
return pd.to_datetime(value, format="%Y-%m-%d")
elif date_format == "date":
return pd.to_datetime(value, format="%Y-%m-%d")
elif date_format == "strict_date_hour":
return pd.to_datetime(value, format="%Y-%m-%dT%H")
elif date_format == "date_hour":
return pd.to_datetime(value, format="%Y-%m-%dT%H")
elif date_format == "strict_date_hour_minute":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M")
elif date_format == "date_hour_minute":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M")
elif date_format == "strict_date_hour_minute_second":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S")
elif date_format == "date_hour_minute_second":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S")
elif date_format == "strict_date_hour_minute_second_fraction":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f", exact=False)
elif date_format == "date_hour_minute_second_fraction":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f", exact=False)
elif date_format == "strict_date_hour_minute_second_millis":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f", exact=False)
elif date_format == "date_hour_minute_second_millis":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f", exact=False)
elif date_format == "strict_date_time":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f%z", exact=False)
elif date_format == "date_time":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_date_time_no_millis":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S%z")
elif date_format == "date_time_no_millis":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S%z")
elif date_format == "strict_hour":
return pd.to_datetime(value, format="%H")
elif date_format == "hour":
return pd.to_datetime(value, format="%H")
elif date_format == "strict_hour_minute":
return pd.to_datetime(value, format="%H:%M")
elif date_format == "hour_minute":
return pd.to_datetime(value, format="%H:%M")
elif date_format == "strict_hour_minute_second":
return pd.to_datetime(value, format="%H:%M:%S")
elif date_format == "hour_minute_second":
return pd.to_datetime(value, format="%H:%M:%S")
elif date_format == "strict_hour_minute_second_fraction":
return pd.to_datetime(value, format="%H:%M:%S.%f", exact=False)
elif date_format == "hour_minute_second_fraction":
return pd.to_datetime(value, format="%H:%M:%S.%f", exact=False)
elif date_format == "strict_hour_minute_second_millis":
return pd.to_datetime(value, format="%H:%M:%S.%f", exact=False)
elif date_format == "hour_minute_second_millis":
return pd.to_datetime(value, format="%H:%M:%S.%f", exact=False)
elif date_format == "strict_ordinal_date":
return pd.to_datetime(value, format="%Y-%j")
elif date_format == "ordinal_date":
return pd.to_datetime(value, format="%Y-%j")
elif date_format == "strict_ordinal_date_time":
return pd.to_datetime(value, format="%Y-%jT%H:%M:%S.%f%z", exact=False)
elif date_format == "ordinal_date_time":
return pd.to_datetime(value, format="%Y-%jT%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_ordinal_date_time_no_millis":
return pd.to_datetime(value, format="%Y-%jT%H:%M:%S%z")
elif date_format == "ordinal_date_time_no_millis":
return pd.to_datetime(value, format="%Y-%jT%H:%M:%S%z")
elif date_format == "strict_time":
return pd.to_datetime(value, format="%H:%M:%S.%f%z", exact=False)
elif date_format == "time":
return pd.to_datetime(value, format="%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_time_no_millis":
return pd.to_datetime(value, format="%H:%M:%S%z")
elif date_format == "time_no_millis":
return | pd.to_datetime(value, format="%H:%M:%S%z") | pandas.to_datetime |
#!/usr/bin/env python3
#
# Copyright 2019 <NAME> <<EMAIL>>
#
# This file is part of Salus
# (see https://github.com/SymbioticLab/Salus).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 6 21:11:52 2018
@author: peifeng
"""
from __future__ import print_function, absolute_import, division
import re
from datetime import datetime
from collections import defaultdict
import multiprocessing as mp
from pathlib import Path
import pandas as pd
#import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import plotutils as pu
import compmem as cm
# 2018-09-01 06:22:21.180029: Step 341, loss=3.60 (33.8 examples/sec; 0.740 sec/batch)
ptn_iter = re.compile(r"""(?P<timestamp>.+): \s [sS]tep \s (?P<Step>\d+),\s
(loss|perplexity) .* \(
(?P<Speed>[\d.]+) \s examples/sec; \s
(?P<Duration>[\d.]+) \s sec/batch\)?""", re.VERBOSE)
def parse_iterations(path):
path = Path(path)
iterations = []
with path.open() as f:
for line in f:
line = line.rstrip('\n')
m = ptn_iter.match(line)
if m:
iterations.append(m.groupdict())
df = pd.DataFrame(iterations)
if len(df) == 0:
print(f'File {path} is empty??')
assert len(df) > 0
df['timestamp'] = pd.to_datetime(df['timestamp'])
df['Speed'] = pd.to_numeric(df['Speed'])
df['Step'] = pd.to_numeric(df.Step)
df['Duration'] = pd.to_numeric(df.Duration)
return df
def load_latency(path):
path = Path(path)
case2ex = {
'case1': 'tf',
'case2': 'salus',
'case3': 'mps'
}
data = {}
# case1, 2, 3
for case in path.iterdir():
ex = case2ex[case.name]
s = []
idx = []
for strrate in case.iterdir():
rate = float(strrate.name) # rate req/sec
for f in strrate.glob('*.*.*.*.output'):
latencies = parse_iterations(f)
s.append(latencies.Duration.mean())
idx.append(rate)
s = | pd.Series(s, idx) | pandas.Series |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
def test_tdi_sub_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, delta, box):
# only test adding/sub offsets as + is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + delta
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, delta, box):
# only test adding/sub offsets as - is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="Index fails to return "
"NotImplemented on "
"reverse op",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box_df_fail):
# GH#18849
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box_df_fail):
# GH#18824, GH#19744
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_df_fail):
# GH#18824
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="object dtype Series "
"fails to return "
"NotImplemented",
strict=True, raises=TypeError)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box):
# GH#18849
box2 = Series if box is pd.Index else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_df_fail):
# GH#18824
box = box_df_fail # DataFrame tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps(object):
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_df_fail):
box = box_df_fail # DataFrame op returns object instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, delta, box):
if box is pd.DataFrame and not isinstance(delta, pd.DateOffset):
pytest.xfail(reason="returns m8[ns] instead of raising")
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng * delta
def test_tdi_mul_int_array_zerodim(self, box_df_fail):
box = box_df_fail # DataFrame op returns object dtype
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 0.1))
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * Series(rng5f + 0.1)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_df_fail):
# RangeIndex fails to return NotImplemented, for others
# DataFrame tries to broadcast incorrectly
box = box_df_fail
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__
def test_td64arr_div_nat_invalid(self, box_df_fail):
# don't allow division by NaT (maybe could in the future)
box = box_df_fail # DataFrame returns all-NaT instead of raising
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng / pd.NaT
def test_td64arr_div_int(self, box_df_fail):
box = box_df_fail # DataFrame returns object dtype instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx / 1
tm.assert_equal(result, idx)
def test_tdi_div_tdlike_scalar(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng / delta
tm.assert_equal(result, expected)
def test_tdi_div_tdlike_scalar_with_nat(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng / delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly returns "
"m8[ns] instead of f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_floordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = td1 // scalar_td
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly casts to f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_rfloordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = scalar_td // td1
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns m8[ns] dtype "
"instead of f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_df_fail):
box = box_df_fail # DataFrame returns object dtype
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx // 1
tm.assert_equal(result, idx)
def test_td64arr_floordiv_tdlike_scalar(self, delta, box_df_fail):
box = box_df_fail # DataFrame returns m8[ns] instead of int64 dtype
tdi = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Int64Index((np.arange(10) + 1) * 12, name='foo')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi // delta
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=10, seconds=7),
Timedelta('10m7s'),
Timedelta('10m7s').to_timedelta64()
], ids=lambda x: type(x).__name__)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_df_fail):
# GH#19125
box = box_df_fail # DataFrame op returns m8[ns] instead of f8 dtype
tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
tdi = | tm.box_expected(tdi, box) | pandas.util.testing.box_expected |
"""
Name: diffusion_functions
Purpose: Contains functions to calculate diffusion of distributed wind model
(1) Determine maximum market size as a function of payback time;
(2) Parameterize Bass diffusion curve with diffusion rates (p, q) set by
payback time;
(3) Determine current stage (equivaluent time) of diffusion based on
existing market and current economics; and
(3) Calculate new market share by stepping forward on diffusion curve.
"""
import numpy as np
import pandas as pd
import config
import utility_functions as utilfunc
import decorators
#==============================================================================
# Load logger
logger = utilfunc.get_logger()
#==============================================================================
#=============================================================================
# ^^^^ Diffusion Calculator ^^^^
@decorators.fn_timer(logger = logger, tab_level = 2, prefix = '')
def calc_diffusion_solar(df, is_first_year, bass_params, year,
override_p_value = None, override_q_value = None, override_teq_yr1_value = None):
"""
Calculates the market share (ms) added in the solve year. Market share must be less
than max market share (mms) except initial ms is greater than the calculated mms.
For this circumstance, no diffusion allowed until mms > ms. Also, do not allow ms to
decrease if economics deterioriate. Using the calculated
market share, relevant quantities are updated.
IN: df - pd dataframe - Main dataframe
OUT: df - pd dataframe - Main dataframe
market_last_year - pd dataframe - market to inform diffusion in next year
"""
df = df.reset_index()
bass_params = bass_params[bass_params['tech']=='solar']
# set p/q/teq_yr1 params
df = pd.merge(df, bass_params[['state_abbr', 'bass_param_p', 'bass_param_q', 'teq_yr1', 'sector_abbr']], how = 'left', on = ['state_abbr','sector_abbr'])
# calc diffusion market share
df = calc_diffusion_market_share(df, is_first_year)
# market share floor is based on last year's market share
df['market_share'] = np.maximum(df['diffusion_market_share'], df['market_share_last_year'])
# calculate the "new" market share (old - current)
df['new_market_share'] = df['market_share'] - df['market_share_last_year']
# cap the new_market_share where the market share exceeds the max market share
df['new_market_share'] = np.where(df['market_share'] > df['max_market_share'], 0, df['new_market_share'])
# calculate new adopters, capacity and market value
df['new_adopters'] = df['new_market_share'] * df['developable_agent_weight']
df['new_market_value'] = df['new_adopters'] * df['system_kw'] * df['system_capex_per_kw']
df['new_system_kw'] = df['new_adopters'] * df['system_kw']
df['new_batt_kw'] = df['new_adopters'] * df['batt_kw']
df['new_batt_kwh'] = df['new_adopters'] * df['batt_kwh']
# then add these values to values from last year to get cumulative values:
df['number_of_adopters'] = df['adopters_cum_last_year'] + df['new_adopters']
df['market_value'] = df['market_value_last_year'] + df['new_market_value']
df['system_kw_cum'] = df['system_kw_cum_last_year'] + df['new_system_kw']
df['batt_kw_cum'] = df['batt_kw_cum_last_year'] + df['new_batt_kw']
df['batt_kwh_cum'] = df['batt_kwh_cum_last_year'] + df['new_batt_kwh']
# constrain state-level capacity totals to known historical values
if year in (2014, 2016, 2018):
group_cols = ['state_abbr', 'sector_abbr', 'year']
state_capacity_total = (df[group_cols+['system_kw_cum', 'batt_kw_cum', 'batt_kwh_cum', 'agent_id']].groupby(group_cols)
.agg({'system_kw_cum':'sum', 'batt_kw_cum':'sum', 'batt_kwh_cum':'sum', 'agent_id':'count'})
.rename(columns={'system_kw_cum':'state_solar_kw_cum', 'batt_kw_cum':'state_batt_kw_cum', 'batt_kwh_cum':'state_batt_kwh_cum', 'agent_id':'agent_count'})
.reset_index())
# coerce dtypes
state_capacity_total.state_solar_kw_cum = state_capacity_total.state_solar_kw_cum.astype(np.float64)
state_capacity_total.state_batt_kw_cum = state_capacity_total.state_batt_kw_cum.astype(np.float64)
state_capacity_total.state_batt_kwh_cum = state_capacity_total.state_batt_kwh_cum.astype(np.float64)
df.system_kw_cum = df.system_kw_cum.astype(np.float64)
df.batt_kw_cum = df.batt_kw_cum.astype(np.float64)
df.batt_kwh_cum = df.batt_kwh_cum.astype(np.float64)
# merge state totals back to agent df
df = pd.merge(df, state_capacity_total, how = 'left', on = ['state_abbr', 'sector_abbr', 'year'])
# read csv of historical capacity values by state and sector
historical_state_df = pd.read_csv(config.OBSERVED_DEPLOYMENT_BY_STATE)
# join historical data to agent df
df = pd.merge(df, historical_state_df, how='left', on=['state_abbr', 'sector_abbr', 'year'])
# calculate scale factor - weight that is given to each agent based on proportion of state total
# where state cumulative capacity is 0, proportion evenly to all agents
df['solar_scale_factor'] = np.where(df['state_solar_kw_cum'] == 0, 1.0/df['agent_count'], df['system_kw_cum'] / df['state_solar_kw_cum'])
df['batt_mw_scale_factor'] = np.where(df['state_batt_kw_cum'] == 0, 1.0/df['agent_count'], df['batt_kw_cum'] / df['state_batt_kw_cum'])
df['batt_mwh_scale_factor'] = np.where(df['state_batt_kwh_cum'] == 0, 1.0/df['agent_count'], df['batt_kwh_cum'] / df['state_batt_kwh_cum'])
# use scale factor to constrain agent capacity values to historical values
df['system_kw_cum'] = df['solar_scale_factor'] * df['observed_solar_mw'] * 1000.
df['batt_kw_cum'] = df['batt_mw_scale_factor'] * df['observed_storage_mw'] * 1000.
df['batt_kwh_cum'] = df['batt_mwh_scale_factor'] * df['observed_storage_mwh'] * 1000.
# recalculate number of adopters using anecdotal values
df['number_of_adopters'] = np.where(df['sector_abbr'] == 'res', df['system_kw_cum']/5.0, df['system_kw_cum']/100.0)
# recalculate market share
df['market_share'] = np.where(df['developable_agent_weight'] == 0, 0.0,
df['number_of_adopters'] / df['developable_agent_weight'])
df['market_share'] = df['market_share'].astype(np.float64)
df.drop(['agent_count',
'state_solar_kw_cum','state_batt_kw_cum','state_batt_kwh_cum',
'observed_solar_mw','observed_storage_mw','observed_storage_mwh',
'solar_scale_factor','batt_mw_scale_factor','batt_mwh_scale_factor'], axis=1, inplace=True)
market_last_year = df[['agent_id',
'market_share','max_market_share','number_of_adopters',
'market_value','initial_number_of_adopters','initial_pv_kw','initial_batt_kw','initial_batt_kwh',
'initial_market_share','initial_market_value',
'system_kw_cum','new_system_kw',
'batt_kw_cum','new_batt_kw',
'batt_kwh_cum','new_batt_kwh']]
market_last_year.rename(columns={'market_share':'market_share_last_year',
'max_market_share':'max_market_share_last_year',
'number_of_adopters':'adopters_cum_last_year',
'market_value': 'market_value_last_year',
'system_kw_cum':'system_kw_cum_last_year',
'batt_kw_cum':'batt_kw_cum_last_year',
'batt_kwh_cum':'batt_kwh_cum_last_year'}, inplace=True)
return df, market_last_year
#=============================================================================
# ^^^^ Diffusion Calculator ^^^^
@decorators.fn_timer(logger = logger, tab_level = 3, prefix = '')
def calc_diffusion(df, cur, con, techs, choose_tech, sectors, schema, is_first_year,
bass_params, override_p_value = None, override_q_value = None, override_teq_yr1_value = None):
"""
Calculates the market share (ms) added in the solve year. Market share must be less
than max market share (mms) except initial ms is greater than the calculated mms.
For this circumstance, no diffusion allowed until mms > ms. Also, do not allow ms to
decrease if economics deterioriate. Using the calculated
market share, relevant quantities are updated.
IN: df - pd dataframe - Main dataframe
OUT: df - pd dataframe - Main dataframe
market_last_year - pd dataframe - market to inform diffusion in next year
"""
logger.info("\t\tCalculating Diffusion")
# set p/q/teq_yr1 params
df = set_bass_param(df, bass_params, override_p_value, override_q_value, override_teq_yr1_value)
# calc diffusion market share
df = calc_diffusion_market_share(df, is_first_year)
# ensure no diffusion for non-selected options
df['diffusion_market_share'] = df['diffusion_market_share'] * df['selected_option']
# market share floor is based on last year's market share
df['market_share'] = np.maximum(df['diffusion_market_share'], df['market_share_last_year'])
# if in tech choice mode, ensure that total market share doesn't exceed 1
if choose_tech == True:
# extract out the rows for unselected technologies
market_share_cap = df[df['selected_option'] == False][['county_id', 'bin_id', 'sector_abbr', 'market_share']].groupby(['county_id', 'bin_id', 'sector_abbr']).sum().reset_index()
# determine how much market share is allowable based on 1 - the MS of the unselected techs
market_share_cap['market_share_cap'] = 1 - market_share_cap['market_share']
# drop the market share column
market_share_cap.drop('market_share', inplace = True, axis = 1)
# merge to df
df = | pd.merge(df, market_share_cap, how = 'left', on = ['county_id', 'bin_id', 'sector_abbr']) | pandas.merge |
from __future__ import print_function
import os, sys, pwd, json, pandas as pd, numpy as np, sqlite3, pwd, uuid, platform, re, base64, string,enum,shelve
import matplotlib as mpl
import matplotlib.cm
import requests
from datetime import datetime as timr
from rich import print as outy
from sqlite3 import connect
from glob import glob
from copy import deepcopy as dc
import functools
import httplib2
import six
from waybackpy import WaybackMachineSaveAPI as checkpoint
from threading import Thread, Lock
from six.moves.urllib.parse import urlencode
if six.PY2:
from string import maketrans
else:
maketrans = bytes.maketrans
from difflib import SequenceMatcher
from sqlalchemy import create_engine
import pandas as pd
import psutil
import time
from telegram import Update, ForceReply, Bot
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext
from github import Github
import base64
from cryptography.fernet import Fernet
def flatten_list(lyst: list) -> list:
if not lyst:
return []
big_list = len(lyst) > 1
if isinstance(lyst[0], list):
return flatten_list(lyst[0]) + (big_list * flatten_list(lyst[1:]))
else:
return [lyst[0]] + (big_list * flatten_list(lyst[1:]))
def json_set_check(obj):
"""
json.dump(X,default=json_set_check)
https://stackoverflow.com/questions/22281059/set-object-is-not-json-serializable
"""
if isinstance(obj, set):
return list(obj)
raise TypeError
def live_link(url:str):
response = False
try:
response_type = requests.get(url)
response = response_type.status_code < 400
time.sleep(2)
except:
pass
return response
def save_link(url:str):
save_url = None
if live_link(url):
saver = checkpoint(url, user_agent="Mozilla/5.0 (Windows NT 5.1; rv:40.0) Gecko/20100101 Firefox/40.0")
try:
save_url = saver.save()
time.sleep(10)
if save_url is None:
save_url = saver.saved_archive
except Exception as e:
print(f"Issue with saving the link {url}: {e}")
pass
return save_url
def zip_from_archive(url:str, file_name:str="tmp.zip"):
if not file_name.endswith(".zip"):
file_name += ".zip"
if "web.archive.org" in url and live_link(url):
try:
new_url = url.replace('/https://','if_/https://')
req = requests.get(new_url)
open(file_name,"wb").write(req.content)
except Exception as e:
print(f"Exception :> {e}")
return file_name
def str_to_base64(string, password:bool=False, encoding:str='utf-8'):
current = base64.b64encode(string.encode(encoding))
if password:
key = Fernet.generate_key()
current = Fernet(key).encrypt(current)
key = key.decode(encoding)
return (current.decode(encoding), key or None)
def base64_to_str(b64, password:str=None, encoding:str='utf-8'):
if password:
current = Fernet(password.encode(encoding)).decrypt(b64.encode(encoding)).decode(encoding)
return base64.b64decode(current or b64).decode(encoding)
def silent_exec(default=None, returnException:bool=False):
"""
https://stackoverflow.com/questions/39905390/how-to-auto-wrap-function-call-in-try-catch
Usage: @silent_exec()
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
return e if returnException else default
return wrapper
return decorator
def write_shelf(shelf_name, print_out:bool=False):
with shelve.open(shelf_name, 'n') as shelf:
for key in dir():
try:
shelf[key] = globals()[key]
except TypeError:
print(f"Error shelving the key {key} due to a type error")
except Exception as e:
print(f"Error shelving the key {key} due to {e}")
if print_out:
print(f"The shelf has been written")
def load_shelf(shelf_name, print_out:bool=False):
with shelve.open(shelf_name) as shelf:
for key in shelf:
if print_out:
print(f"Loading the shelf item {key}")
globals()[key] = shelf[key]
if print_out:
print(f"The shelf has been loaded")
def install_import(importname):
os.system(f"{sys.executable} -m pip install {importname} --upgrade")
def user():
return str(pwd.getpwuid(os.getuid())[0]).strip().lower()
percent = lambda x,y: ("{0:.2f}").format(100 * (x / float(y)))
cur_time = str(timr.now().strftime('%Y_%m_%d-%H_%M'))
rnd = lambda _input: f"{round(_input * 100)} %"
similar = lambda x,y:SequenceMatcher(None, a, b).ratio()*100
file_by_type = lambda PATH,ext:[os.path.join(dp, f) for dp, dn, filenames in os.walk(PATH) for f in filenames if os.path.splitext(f)[1] == ext]
file_by_name = lambda PATH,name:[os.path.join(dp, f) for dp, dn, filenames in os.walk(PATH) for f in filenames if f == name]
of_dir = lambda PATH,name:[os.path.join(dp, f) for dp, dn, filenames in os.walk(PATH) for f in filenames if os.path.isdir(f) and f == name]
def metrics(TP,FP,TN,FN, use_percent:bool=False):
div = lambda x,y:x/y if y else 0
prep = lambda x:percent(x, 100) if use_percent else x
precision, recall = div(TP , (TP + FP)), div(TP , (TP + FN))
return {
'TP': TP,
'FP': FP,
'TN': TN,
'FN': FN,
'Precision_PPV': prep(precision),
'Recall': prep(recall),
'Specificity_TNR': prep(div(TN , (TN + FP))),
'FNR': prep(div(FN , (FN + TP))),
'FPR': prep(div(FP , (FP + TN))),
'FDR': prep(div(FP , (FP + TP))),
'FOR': prep(div(FN , (FN + TN))),
'TS': prep(div(TP , (TP + FN + FP))),
'Accuracy': prep(div((TP + TN) , (TP + TN + FP + FN))),
'PPCR': prep(div((TP + FP) , (TP + TN + FP + FN))),
'F1': prep(2 * div( (precision * recall),(precision + recall) )),
}
def add_metrics(fwame, TP:str='TP',FP:str='FP',TN:str='TN',FN:str='FN', use_percent:bool=False):
div = lambda x,y:x/y if y else 0
prep = lambda x:percent(x, 100) if use_percent else x
fwame['Precision_PPV'] = prep(fwame[TP]/(fwame[TP]+fwame[FP]))
fwame['Recall'] = prep(fwame[TP]/(fwame[TP]+fwame[FN]))
fwame['Specificity_TNR'] = prep(fwame[TN]/(fwame[TN]+fwame[FP]))
fwame['FNR'] = prep(fwame[FN]/(fwame[FN]+fwame[TP]))
fwame['FPR'] = prep(fwame[FP]/(fwame[FP]+fwame[TN]))
fwame['FDR'] = prep(fwame[FP]/(fwame[FP]+fwame[TP]))
fwame['FOR'] = prep(fwame[FN]/(fwame[FN]+fwame[TN]))
fwame['TS'] = prep(fwame[TP]/(fwame[TP]+fwame[FP]+fwame[FN]))
fwame['Accuracy'] = prep((fwame[TP]+fwame[TN])/(fwame[TP]+fwame[FP]+fwame[TN]+fwame[FN]))
fwame['PPCR'] = prep((fwame[TP]+fwame[FP])/(fwame[TP]+fwame[FP]+fwame[TN]+fwame[FN]))
fwame['F1'] = prep(2 * ((fwame['Precision_PPV'] * fwame['Recall'])/(fwame['Precision_PPV'] + fwame['Recall'])))
return fwame
def compare_dicts(raw_dyct_one, raw_dyct_two):
one,two = dc(raw_dyct_one),dc(raw_dyct_two)
for dyct in [one,two]:
for key in list(dyct.keys()):
if from_nan(dyct[key]) == None:
dyct[key] = np.nan
return set(one.items()) ^ set(two.items())
diff_lists = lambda one,two: set(one) ^ set(two)
same_dicts = lambda dyct_one, dyct_two: compare_dicts(dyct_one, dyct_two) == set()
def contains_dict(list_dicts, current_dict):
for dyct in list_dicts:
if same_dicts(dyct, current_dict):
return True
return False
def frame_dycts(frame):
"""
output = []
for row in frame.itertuples():
output += [row._asdict()]
return output
"""
return frame.to_dict('records')
def pd_to_arr(frame):
return frame_dycts(frame)
def dyct_frame(raw_dyct,deepcopy:bool=True):
dyct = dc(raw_dyct) if deepcopy else raw_dyct
for key in list(raw_dyct.keys()):
dyct[key] = [dyct[key]]
return | pd.DataFrame.from_dict(dyct) | pandas.DataFrame.from_dict |
############################################################################
#Copyright 2019 Google LLC
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#################################################################################################
import pandas as pd
import numpy as np
from pathlib import Path
import os
#### The warnings from Sklearn are so annoying that I have to shut it off ####
import warnings
warnings.filterwarnings("ignore")
def warn(*args, **kwargs):
pass
warnings.warn = warn
########################################
import warnings
warnings.filterwarnings("ignore")
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
####################################################################################
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
# from matplotlib import io
import io
import seaborn as sns
sns.set(style="whitegrid", color_codes=True)
import re
import pdb
import pprint
import matplotlib
matplotlib.style.use('seaborn')
from itertools import cycle, combinations
from collections import defaultdict
import copy
import time
import sys
import random
import xlrd
import statsmodels
from io import BytesIO
import base64
from functools import reduce
import traceback
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from xgboost.sklearn import XGBRegressor
from sklearn.model_selection import train_test_split
######## This is where we import HoloViews related libraries #########
import hvplot.pandas
import holoviews as hv
from holoviews import opts
#hv.notebook_extension('bokeh')
hv.extension('bokeh', 'matplotlib')
#hv.extension('bokeh')
import panel as pn
import panel.widgets as pnw
import holoviews.plotting.bokeh
######## This is where we store the image data in a dictionary with a list of images #########
def save_image_data(fig, chart_format, plot_name, depVar, mk_dir, additional=''):
if not os.path.isdir(mk_dir):
os.mkdir(mk_dir)
if additional == '':
filename = os.path.join(mk_dir,plot_name+"."+chart_format)
else:
filename = os.path.join(mk_dir,plot_name+additional+"."+chart_format)
##################################################################################
if chart_format == 'svg':
###### You have to add these lines to each function that creates charts currently ##
imgdata = io.StringIO()
fig.savefig(filename, dpi='figure', format=chart_format)
imgdata.seek(0)
svg_data = imgdata.getvalue()
return svg_data
else:
### You have to do it slightly differently for PNG and JPEG formats
imgdata = BytesIO()
fig.savefig(filename,format=chart_format, dpi='figure')
#fig.savefig(imgdata, format=chart_format, bbox_inches='tight', pad_inches=0.0)
imgdata.seek(0)
figdata_png = base64.b64encode(imgdata.getvalue())
return figdata_png
def save_html_data(hv_all, chart_format, plot_name, mk_dir, additional=''):
print('Saving %s in HTML format' %(plot_name+additional))
if not os.path.isdir(mk_dir):
os.mkdir(mk_dir)
if additional == '':
filename = os.path.join(mk_dir,plot_name+"."+chart_format)
else:
filename = os.path.join(mk_dir,plot_name+additional+"."+chart_format)
pn.panel(hv_all).save(filename, embed=True) ## it is amazing you can save interactive plots ##
#### This module analyzes a dependent Variable and finds out whether it is a
#### Regression or Classification type problem
def analyze_problem_type(train, target, verbose=0) :
target = copy.deepcopy(target)
cat_limit = 30 ### this determines the number of categories to name integers as classification ##
float_limit = 15 ### this limits the number of float variable categories for it to become cat var
if isinstance(target, str):
target = [target]
if len(target) == 1:
targ = target[0]
else:
targ = target[0]
#### This is where you detect what kind of problem it is #################
if train[targ].dtype in ['int64', 'int32','int16']:
if len(train[targ].unique()) <= 2:
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 2 and len(train[targ].unique()) <= cat_limit:
model_class = 'Multi_Classification'
else:
model_class = 'Regression'
elif train[targ].dtype in ['float16','float32','float64']:
if len(train[targ].unique()) <= 2:
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 2 and len(train[targ].unique()) <= float_limit:
model_class = 'Multi_Classification'
else:
model_class = 'Regression'
else:
if len(train[targ].unique()) <= 2:
model_class = 'Binary_Classification'
else:
model_class = 'Multi_Classification'
########### print this for the start of next step ###########
if verbose <= 2:
print('''\n################ %s VISUALIZATION Started #####################''' %model_class)
return model_class
#################################################################################
# Pivot Tables are generally meant for Categorical Variables on the axes
# and a Numeric Column (typically the Dep Var) as the "Value" aggregated by Sum.
# Let's do some pivot tables to capture some meaningful insights
import random
def draw_pivot_tables(dft,cats,nums,problem_type,verbose,chart_format,depVar='', classes=None, mk_dir=None):
plot_name = 'Bar_Plots_Pivots'
cats = list(set(cats))
dft = dft[:]
cols = 2
cmap = plt.get_cmap('jet')
#### For some reason, the cmap colors are not working #########################
colors = cmap(np.linspace(0, 1, len(cats)))
colors = cycle('byrcmgkbyrcmgkbyrcmgkbyrcmgkbyr')
#colormaps = ['summer', 'rainbow','viridis','inferno','magma','jet','plasma']
colormaps = ['Greys','Blues','Greens','GnBu','PuBu',
'YlGnBu','PuBuGn','BuGn','YlGn']
#colormaps = ['Purples','Oranges','Reds','YlOrBr',
# 'YlOrRd','OrRd','PuRd','RdPu','BuPu',]
N = len(cats)
if N==0:
print('No categorical or boolean vars in data set. Hence no pivot plots...')
return None
noplots = copy.deepcopy(N)
#### You can set the number of subplots per row and the number of categories to display here cols = 2
displaylimit = 20
categorylimit = 10
imgdata_list = []
width_size = 15
height_size = 5
stringlimit = 20
combos = combinations(cats, 2)
N = len(cats)
if N <= 1:
### if there are not many categorical variables, there is nothing to plot
return imgdata_list
if len(nums) == 0:
### if there are no numeric variables, there is nothing to plot
return imgdata_list
if not depVar is None or not depVar=='' or not depVar==[] :
########### This works equally well for classification as well as Regression ###
lst=[]
noplots=int((N**2-N)/2)
dicti = {}
counter = 1
cols = 2
if noplots%cols == 0:
if noplots == 0:
rows = 1
else:
rows = noplots/cols
else:
rows = (noplots/cols)+1
#fig = plt.figure(figsize=(min(20,N*10),rows*5))
fig = plt.figure()
if cols < 2:
fig.set_size_inches(min(15,8),rows*height_size)
fig.subplots_adjust(hspace=0.5) ### This controls the space betwen rows
fig.subplots_adjust(wspace=0.3) ### This controls the space between columns
else:
fig.set_size_inches(min(cols*10,20),rows*height_size)
fig.subplots_adjust(hspace=0.5) ### This controls the space betwen rows
fig.subplots_adjust(wspace=0.3) ### This controls the space between columns
for (var1, var2) in combos:
color1 = random.choice(colormaps)
data = pd.DataFrame(dicti)
x=dft[var1]
y=dft[var2]
ax1 = fig.add_subplot(rows,cols,counter)
nocats = min(categorylimit,dft[var1].nunique())
nocats1 = min(categorylimit,dft[var2].nunique())
if dft[depVar].dtype==object or dft[depVar].dtype==bool:
dft[depVar] = dft[depVar].factorize()[0]
data = pd.pivot_table(dft,values=depVar,index=var1, columns=var2).head(nocats)
data = data[data.columns[:nocats1]] #### make sure you don't print more than 10 rows of data
data.plot(kind='bar',ax=ax1,colormap=color1)
ax1.set_xlabel(var1)
ax1.set_ylabel(depVar)
if dft[var1].dtype == object or str(dft[var1].dtype) == 'category':
labels = data.index.str[:stringlimit].tolist()
else:
labels = data.index.tolist()
ax1.set_xticklabels(labels,fontdict={'fontsize':10}, rotation = 45, ha="right")
ax1.legend(fontsize="medium")
ax1.set_title('%s (Mean) by %s and %s' %(depVar,var1,var2),fontsize=12)
counter += 1
fig.tight_layout()
fig.suptitle('Pivot Tables of each Continuous var by 2 Categoricals', fontsize=15,y=1.01);
else:
print('No pivot tables plotted since no dependent variable given as input')
image_count = 0
if verbose == 2:
imgdata_list.append(save_image_data(fig, chart_format,
plot_name, depVar, mk_dir))
image_count += 1
if verbose <= 1:
plt.show();
####### End of Pivot Plotting #############################
return imgdata_list
# In[ ]:
# SCATTER PLOTS ARE USEFUL FOR COMPARING NUMERIC VARIABLES
def draw_scatters(dfin,nums,verbose,chart_format,problem_type,dep=None, classes=None, lowess=False, mk_dir=None):
plot_name = 'Scatter_Plots'
dft = dfin[:]
##### we are going to modify dfin and classes, so we are making copies to make changes
classes = copy.deepcopy(classes)
colortext = 'brymcgkbyrcmgkbyrcmgkbyrcmgkbyr'
if len(classes) == 0:
leng = len(nums)
else:
leng = len(classes)
colors = cycle(colortext[:leng])
#imgdata_list = defaultdict(list)
imgdata_list = []
if dfin.shape[0] >= 10000 or lowess == False:
lowess = False
x_est = None
transparent = 0.6
bubble_size = 80
else:
if verbose <= 1:
print('Using Lowess Smoothing. This might take a few minutes for large data sets...')
lowess = True
x_est = None
transparent = 0.6
bubble_size = 100
if verbose <= 1:
x_est = np.mean
N = len(nums)
cols = 2
width_size = 15
height_size = 4
if dep == None or dep == '':
### when there is no dependent variable, you can't plot anything in scatters here ###
return None
elif problem_type == 'Regression':
image_count = 0
####### This is a Regression Problem so it requires 2 steps ####
####### First, plot every Independent variable against the Dependent Variable ###
noplots = len(nums)
rows = int((noplots/cols)+0.99)
fig = plt.figure(figsize=(width_size,rows*height_size))
for num, plotcounter, color_val in zip(nums, range(1,noplots+1), colors):
### Be very careful with the next line. It should be singular "subplot" ##
##### Otherwise, if you use the plural version "subplots" it has a different meaning!
plt.subplot(rows,cols,plotcounter)
if lowess:
sns.regplot(x=dft[num], y = dft[dep], lowess=lowess, color=color_val, ax=plt.gca())
else:
sns.scatterplot(x=dft[num], y=dft[dep], ax=plt.gca(), palette='dark',color=color_val)
plt.xlabel(num)
plt.ylabel(dep)
fig.suptitle('Scatter Plot of each Continuous Variable vs Target',fontsize=15,y=1.01)
fig.tight_layout();
if verbose <= 1:
plt.show();
#### Keep it at the figure level###
if verbose == 2:
imgdata_list.append(save_image_data(fig, chart_format,
plot_name, dep, mk_dir))
image_count += 1
else:
####### This is a Classification Problem #### You need to plot a strip plot ####
####### First, Plot each Continuous variable against the Target Variable ###
pdb.set_trace()
if len(dft) < 1000:
jitter = 0.05
else:
jitter = 0.5
image_count = 0
noplots = len(nums)
rows = int((noplots/cols)+0.99)
### Be very careful with the next line. we have used the singular "subplot" ##
fig = plt.figure(figsize=(width_size,rows*height_size))
for num, plotc, color_val in zip(nums, range(1,noplots+1),colors):
####Strip plots are meant for categorical plots so x axis must always be depVar ##
plt.subplot(rows,cols,plotc)
sns.stripplot(x=dft[dep], y=dft[num], ax=plt.gca(), jitter=jitter)
plt.suptitle('Scatter Plot of Continuous Variable vs Target (jitter=%0.2f)' %jitter, fontsize=15,y=1.01)
fig.tight_layout();
if verbose <= 1:
plt.show();
if verbose == 2:
imgdata_list.append(save_image_data(fig, chart_format,
plot_name, dep, mk_dir))
image_count += 1
####### End of Scatter Plots ######
return imgdata_list
# PAIR SCATTER PLOTS ARE NEEDED ONLY FOR CLASSIFICATION PROBLEMS IN NUMERIC VARIABLES
def draw_pair_scatters(dfin,nums,problem_type, verbose,chart_format, dep=None, classes=None, lowess=False, mk_dir=None):
"""
### This is where you plot a pair-wise scatter plot of Independent Variables against each other####
"""
plot_name = 'Pair_Scatter_Plots'
dft = dfin[:]
if len(nums) <= 1:
return
classes = copy.deepcopy(classes)
cols = 2
colortext = 'brymcgkbyrcmgkbyrcmgkbyrcmgkbyr'
colors = cycle(colortext)
imgdata_list = list()
width_size = 15
height_size = 4
N = len(nums)
if dfin.shape[0] >= 10000 or lowess == False:
x_est = None
transparent =0.7
bubble_size = 80
elif lowess:
print('Using Lowess Smoothing. This might take a few minutes for large data sets...')
x_est = None
transparent =0.7
bubble_size = 100
else:
x_est = None
transparent =0.7
bubble_size = 100
if verbose <= 1:
x_est = np.mean
if problem_type == 'Regression' or problem_type == 'Clustering':
image_count = 0
### Second, plot a pair-wise scatter plot of Independent Variables against each other####
combos = combinations(nums, 2)
noplots = int((N**2-N)/2)
print('Number of All Scatter Plots = %d' %(noplots+N))
rows = int((noplots/cols)+0.99)
fig = plt.figure(figsize=(width_size,rows*height_size))
for (var1,var2), plotcounter,color_val in zip(combos, range(1,noplots+1),colors):
### Be very careful with the next line. It should be singular "subplot" ##
##### Otherwise, if you use the plural version "subplots" it has a different meaning!
plt.subplot(rows,cols,plotcounter)
if lowess:
sns.regplot(x=dft[var1], y=dft[var2], lowess=lowess, color=color_val, ax=plt.gca())
else:
sns.scatterplot(x=dft[var1], y=dft[var2], ax=plt.gca(), palette='dark',color=color_val)
plt.xlabel(var1)
plt.ylabel(var2)
fig.suptitle('Pair-wise Scatter Plot of all Continuous Variables', fontsize=15,y=1.01)
fig.tight_layout();
if verbose == 2:
imgdata_list.append(save_image_data(fig, chart_format,
plot_name, dep, mk_dir))
image_count += 1
if verbose <= 1:
plt.show();
else:
########## This is for Classification problems ##########
if len(classes) <= 1:
leng = 1
else:
leng = len(classes)
colors = cycle(colortext[:leng])
image_count = 0
#cmap = plt.get_cmap('gnuplot')
#cmap = plt.get_cmap('Set1')
cmap = plt.get_cmap('Paired')
combos = combinations(nums, 2)
combos_cycle = cycle(combos)
noplots = int((N**2-N)/2)
print('Total Number of Scatter Plots = %d' %(noplots+N))
rows = int((noplots/cols)+0.99)
fig = plt.figure(figsize=(width_size,rows*height_size))
### Be very careful with the next line. we have used the plural "subplots" ##
## In this case, you have ax as an array and you have to use (row,col) to get each ax!
target_vars = dft[dep].unique()
number = len(target_vars)
#colors = [cmap(i) for i in np.linspace(0, 1, number)]
for (var1,var2), plotc in zip(combos, range(1,noplots+1)):
for target_var, color_val, class_label in zip(target_vars, colors, classes):
#Fix color in all scatter plots for each class the same using this trick
color_array = np.empty(0)
value = dft[dep]==target_var
dft['color'] = np.where(value==True, color_val, 'r')
color_array = np.hstack((color_array, dft[dft['color']==color_val]['color'].values))
plt.subplot(rows, cols, plotc)
plt.scatter(x=dft.loc[dft[dep]==target_var][var1], y=dft.loc[dft[dep]==target_var][var2],
label=class_label, color=color_val, alpha=transparent)
plt.xlabel(var1)
plt.ylabel(var2)
plt.legend()
fig.suptitle('Pair-wise Scatter Plot of all Continuous Variables',fontsize=15,y=1.01)
#fig.tight_layout();
if verbose == 2:
imgdata_list.append(save_image_data(fig, chart_format,
plot_name, dep, mk_dir))
image_count += 1
if verbose <= 1:
plt.show();
####### End of Pair Scatter Plots ######
return imgdata_list
#Bar Plots are for 2 Categoricals and One Numeric (usually Dep Var)
def plot_fast_average_num_by_cat(dft, cats, num_vars, verbose=0,kind="bar"):
"""
Great way to plot continuous variables fast grouped by a categorical variable. Just sent them in and it will take care of the rest!
"""
#if verbose <= 1:
# ipython inline magic shouldn't be needed because all plots are
# being displayed with plt.show() calls
#get_ipython().magic('matplotlib inline')
chunksize = 20
stringlimit = 20
col = 2
width_size = 15
height_size = 4
N = int(len(num_vars)*len(cats))
colors = cycle('byrcmgkbyrcmgkbyrcmgkbyrcmgk')
if N % 2 == 0:
row = N//col
else:
row = int(N//col + 1)
fig = plt.figure()
if kind == 'bar':
fig.suptitle('Bar plots for each Continuous by each Categorical variable', fontsize=15,y=1.01)
else:
fig.suptitle('Time Series plots for all date-time vars %s' %cats, fontsize=15,y=1.01)
if col < 2:
fig.set_size_inches(min(15,8),row*5)
fig.subplots_adjust(hspace=0.5) ### This controls the space betwen rows
fig.subplots_adjust(wspace=0.3) ### This controls the space between columns
else:
fig.set_size_inches(min(col*10,20),row*5)
fig.subplots_adjust(hspace=0.5) ### This controls the space betwen rows
fig.subplots_adjust(wspace=0.3) ### This controls the space between columns
counter = 1
for cat in cats:
for each_conti in num_vars:
color3 = next(colors)
try:
ax1 = plt.subplot(row, col, counter)
if kind == "bar":
data = dft.groupby(cat)[each_conti].mean().sort_values(
ascending=False).head(chunksize)
data.plot(kind=kind,ax=ax1,color=color3)
elif kind == "line":
data = dft.groupby(cat)[each_conti].mean().sort_index(
ascending=True).head(chunksize)
data.plot(kind=kind,ax=ax1,color=color3)
if dft[cat].dtype == object or str(dft[cat].dtype) == 'category':
labels = data.index.str[:stringlimit].tolist()
else:
labels = data.index.tolist()
ax1.set_xlabel("")
ax1.set_xticklabels(labels,fontdict={'fontsize':9}, rotation = 45, ha="right")
ax1.set_title('Average %s by %s (Top %d)' %(each_conti,cat,chunksize))
counter += 1
except:
ax1.set_title('No plot as %s is not numeric' %each_conti)
counter += 1
if verbose <= 1:
plt.show()
if verbose == 2:
return fig
################# The barplots module below calls the plot_fast_average_num_by_cat module above ###
def draw_barplots(dft,cats,conti,problem_type,verbose,chart_format,dep='', classes=None, mk_dir=None):
cats = cats[:]
conti = conti[:]
plot_name = 'Bar_Plots'
#### Category limit within a variable ###
#### Remove Floating Point Categorical Vars from this list since they Error when Bar Plots are drawn
cats = [x for x in cats if dft[x].dtype != float]
dft = dft[:]
N = len(cats)
if len(cats) == 0 or len(conti) == 0:
print('No categorical or numeric vars in data set. Hence no bar charts.')
return None
cmap = plt.get_cmap('jet')
### Not sure why the cmap doesn't work and gives an error in some cases #################
colors = cmap(np.linspace(0, 1, len(conti)))
colors = cycle('gkbyrcmgkbyrcmgkbyrcmgkbyr')
colormaps = ['plasma','viridis','inferno','magma']
imgdata_list = list()
cat_limit = 10
conti = list_difference(conti,dep)
#### Make sure that you plot charts for the depVar as well by including it #######
if problem_type == 'Regression':
conti.append(dep)
elif problem_type.endswith('Classification'):
cats.append(dep)
else:
### Since there is no dependent variable in clustering there is nothing to add dep to.
pass
chunksize = 20
########## This is for Regression Problems only ######
image_count = 0
figx = plot_fast_average_num_by_cat(dft, cats, conti, verbose)
if verbose == 2:
imgdata_list.append(save_image_data(figx, chart_format,
plot_name, dep, mk_dir))
image_count += 1
return imgdata_list
############## End of Bar Plotting ##########################################
##### Draw a Heatmap using Pearson Correlation #########################################
def draw_heatmap(dft, conti, verbose,chart_format,datevars=[], dep=None,
modeltype='Regression',classes=None, mk_dir=None):
### Test if this is a time series data set, then differene the continuous vars to find
### if they have true correlation to Dependent Var. Otherwise, leave them as is
plot_name = 'Heat_Maps'
width_size = 3
height_size = 2
timeseries_flag = False
if len(conti) <= 1:
return
if isinstance(dft.index, pd.DatetimeIndex) :
dft = dft[:]
timeseries_flag = True
pass
elif len(datevars) > 0:
dft = dft[:]
try:
dft.index = pd.to_datetime(dft.pop(datevars[0]),infer_datetime_format=True)
timeseries_flag = True
except:
if verbose >= 1 and len(datevars) > 0:
print('No date vars could be found or %s could not be indexed.' %datevars)
elif verbose >= 1 and len(datevars) == 0:
print('No date vars could be found in data set')
timeseries_flag = False
# Add a column: the color depends on target variable but you can use whatever function
imgdata_list = list()
if modeltype.endswith('Classification'):
########## This is for Classification problems only ###########
if dft[dep].dtype == object or dft[dep].dtype == np.int64:
dft[dep] = dft[dep].factorize()[0]
image_count = 0
N = len(conti)
target_vars = dft[dep].unique()
fig = plt.figure(figsize=(min(N*width_size,20),min(N*height_size,20)))
if timeseries_flag:
fig.suptitle('Time Series: Heatmap of all Differenced Continuous vars for target = %s' %dep, fontsize=15,y=1.01)
else:
fig.suptitle('Heatmap of all Continuous Variables for target = %s' %dep, fontsize=15,y=1.01)
plotc = 1
#rows = len(target_vars)
rows = 1
cols = 1
if timeseries_flag:
dft_target = dft[[dep]+conti].diff()
else:
dft_target = dft[:]
dft_target[dep] = dft[dep].values
corr = dft_target.corr()
plt.subplot(rows, cols, plotc)
ax1 = plt.gca()
sns.heatmap(corr, annot=True,ax=ax1)
plotc += 1
fig.tight_layout();
if verbose <= 1:
plt.show();
if verbose == 2:
imgdata_list.append(save_image_data(fig, chart_format,
plot_name, dep, mk_dir))
image_count += 1
else:
### This is for Regression and None Dep variable problems only ##
image_count = 0
if dep == None or dep == '':
pass
else:
conti += [dep]
dft_target = dft[conti]
if timeseries_flag:
dft_target = dft_target.diff().dropna()
else:
dft_target = dft_target[:]
N = len(conti)
fig = plt.figure(figsize=(min(20,N*width_size),min(20,N*height_size)))
corr = dft_target.corr()
sns.heatmap(corr, annot=True)
if timeseries_flag:
fig.suptitle('Time Series Data: Heatmap of Differenced Continuous vars including target = %s' %dep, fontsize=15,y=1.01)
else:
fig.suptitle('Heatmap of all Continuous Variables including target = %s' %dep,fontsize=15,y=1.01)
fig.tight_layout();
if verbose <= 1:
plt.show();
if verbose == 2:
imgdata_list.append(save_image_data(fig, chart_format,
plot_name, dep, mk_dir))
image_count += 1
return imgdata_list
############# End of Heat Maps ##############
##### Draw the Distribution of each variable using Distplot
##### Must do this only for Continuous Variables
from scipy.stats import probplot,skew
def draw_distplot(dft, cat_bools, conti, verbose,chart_format,problem_type,dep=None, classes=None, mk_dir=None):
cats = find_remove_duplicates(cat_bools) ### first make sure there are no duplicates in this ###
copy_cats = copy.deepcopy(cats)
plot_name = 'Dist_Plots'
#### Since we are making changes to dft and classes, we will be making copies of it here
conti = list(set(conti))
dft = dft[:]
classes = copy.deepcopy(classes)
colors = cycle('brycgkbyrcmgkbyrcmgkbyrcmgkbyr')
imgdata_list = list()
width_size = 15 #### this is to control the width of chart as well as number of categories to display
height_size = 5
gap = 0.4 #### This controls the space between rows ######
if dep==None or dep=='' or problem_type == 'Regression':
image_count = 0
transparent = 0.7
######### This is for cases where there is No Target or Dependent Variable ########
if problem_type == 'Regression':
if isinstance(dep,list):
conti += dep
else:
conti += [dep]
### Be very careful with the next line. we have used the plural "subplots" ##
## In this case, you have ax as an array and you have to use (row,col) to get each ax!
########## This is where you insert the logic for displots ##############
sns.color_palette("Set1")
##### First draw all the numeric variables in row after row #############
if not len(conti) == 0:
cols = 3
rows = len(conti)
fig, axes = plt.subplots(rows, cols, figsize=(width_size,rows*height_size))
k = 1
for each_conti in conti:
color1 = next(colors)
ax1 = plt.subplot(rows, cols, k)
sns.distplot(dft[each_conti],kde=False, ax=ax1, color=color1)
k += 1
ax2 = plt.subplot(rows, cols, k)
sns.boxplot(dft[each_conti], ax=ax2, color=color1)
k += 1
ax3 = plt.subplot(rows, cols, k)
probplot(dft[each_conti], plot=ax3)
k += 1
skew_val=round(dft[each_conti].skew(), 1)
ax2.set_yticklabels([])
ax2.set_yticks([])
ax1.set_title(each_conti + " | Distplot")
ax2.set_title(each_conti + " | Boxplot")
ax3.set_title(each_conti + " | Probability Plot - Skew: "+str(skew_val))
###### Save the plots to disk if verbose = 2 ############
if verbose == 2:
imgdata_list.append(save_image_data(fig, chart_format,
plot_name+'_Numeric', dep, mk_dir))
image_count += 1
##### Now draw each of the categorical variable distributions in each subplot ####
if not len(cats) == 0:
cols = 2
noplots = len(cats)
rows = int((noplots/cols)+0.99 )
k = 0
fig = plt.figure(figsize=(width_size,rows*height_size))
fig.subplots_adjust(hspace=gap) ### This controls the space betwen rows
for each_cat in copy_cats:
color2 = next(colors)
ax1 = plt.subplot(rows, cols, k+1)
kwds = {"rotation": 45, "ha":"right"}
labels = dft[each_cat].value_counts()[:width_size].index.tolist()
dft[each_cat].value_counts()[:width_size].plot(kind='bar', color=color2,
ax=ax1,label='%s' %each_cat)
ax1.set_xticklabels(labels,**kwds);
ax1.set_title('Distribution of %s (top %d categories only)' %(each_cat,width_size))
fig.tight_layout();
########## This is where you end the logic for distplots ################
if verbose == 2:
imgdata_list.append(save_image_data(fig, chart_format,
plot_name+'_Cats', dep, mk_dir))
image_count += 1
fig.suptitle('Histograms (KDE plots) of all Continuous Variables', fontsize=12,y=1.01)
if verbose <= 1:
plt.show();
else:
######### This is for Classification problems only ########
#### Now you can draw both object and numeric variables using same conti_
conti = conti + cats
cols = 2
image_count = 0
transparent = 0.7
noplots = len(conti)
binsize = 30
k = 0
rows = int((noplots/cols)+0.99 )
### Be very careful with the next line. we have used the plural "subplots" ##
## In this case, you have ax as an array and you have to use (row,col) to get each ax!
fig = plt.figure(figsize=(width_size,rows*height_size))
target_vars = dft[dep].unique()
if type(classes[0])==int:
classes = [str(x) for x in classes]
label_limit = len(target_vars)
legend_flag = 1
for each_conti,k in zip(conti,range(len(conti))):
if dft[each_conti].isnull().sum() > 0:
dft[each_conti].fillna(0, inplace=True)
plt.subplot(rows, cols, k+1)
ax1 = plt.gca()
if dft[each_conti].dtype==object:
kwds = {"rotation": 45, "ha":"right"}
labels = dft[each_conti].value_counts()[:width_size].index.tolist()
conti_df = dft[[dep,each_conti]].groupby([dep,each_conti]).size().nlargest(width_size).reset_index(name='Values')
pivot_df = conti_df.pivot(index=each_conti, columns=dep, values='Values')
row_ticks = dft[dep].unique().tolist()
color_list = []
for i in range(len(row_ticks)):
color_list.append(next(colors))
#print('color list = %s' %color_list)
pivot_df.loc[:,row_ticks].plot.bar(stacked=True, color=color_list, ax=ax1)
#dft[each_conti].value_counts()[:width_size].plot(kind='bar',ax=ax1,
# label=class_label)
#ax1.set_xticklabels(labels,**kwds);
ax1.set_title('Distribution of %s (top %d categories only)' %(each_conti,width_size))
else:
for target_var, color2, class_label in zip(target_vars,colors,classes):
try:
if legend_flag <= label_limit:
sns.distplot(dft.loc[dft[dep]==target_var][each_conti],
hist=False, kde=True,
#dft.ix[dft[dep]==target_var][each_conti].hist(
bins=binsize, ax= ax1,
label=target_var, color=color2)
ax1.set_title('Distribution of %s' %each_conti)
legend_flag += 1
else:
sns.distplot(dft.loc[dft[dep]==target_var][each_conti],bins=binsize, ax= ax1,
label=target_var, hist=False, kde=True,
color=color2)
legend_flag += 1
ax1.set_title('Normed Histogram of %s' %each_conti)
except:
pass
ax1.legend(loc='best')
k += 1
fig.tight_layout();
if verbose <= 1:
plt.show();
if verbose == 2:
imgdata_list.append(save_image_data(fig, chart_format,
plot_name+'_Numerics', dep, mk_dir))
image_count += 1
fig.suptitle('Histograms (KDE plots) of all Continuous Variables', fontsize=12,y=1.01)
###### Now draw the distribution of the target variable in Classification only ####
##### Now draw each of the categorical variable distributions in each subplot ####
############################################################################
if problem_type.endswith('Classification'):
col = 2
row = 1
fig, (ax1,ax2) = plt.subplots(row, col)
fig.set_figheight(5)
fig.set_figwidth(15)
fig.suptitle('%s : Distribution of Target Variable' %dep, fontsize=12)
#fig.subplots_adjust(hspace=0.3) ### This controls the space betwen rows
#fig.subplots_adjust(wspace=0.3) ### This controls the space between columns
###### Precentage Distribution is first #################
dft[dep].value_counts(1).plot(ax=ax1,kind='bar')
if dft[dep].dtype == object:
dft[dep] = dft[dep].factorize()[0]
for p in ax1.patches:
ax1.annotate(str(round(p.get_height(),2)), (round(p.get_x()*1.01,2), round(p.get_height()*1.01,2)))
ax1.set_xticks(dft[dep].unique().tolist())
ax1.set_xticklabels(classes, rotation = 45, ha="right", fontsize=9)
ax1.set_title('Percentage Distribution of Target = %s' %dep, fontsize=10, y=1.05)
#### Freq Distribution is next ###########################
dft[dep].value_counts().plot(ax=ax2,kind='bar')
for p in ax2.patches:
ax2.annotate(str(round(p.get_height(),2)), (round(p.get_x()*1.01,2), round(p.get_height()*1.01,2)))
ax2.set_xticks(dft[dep].unique().tolist())
ax2.set_xticklabels(classes, rotation = 45, ha="right", fontsize=9)
ax2.set_title('Freq Distribution of Target Variable = %s' %dep, fontsize=12)
elif problem_type == 'Regression':
############################################################################
width_size = 5
height_size = 5
fig = plt.figure(figsize=(width_size,height_size))
dft[dep].plot(kind='hist')
fig.suptitle('%s : Distribution of Target Variable' %dep, fontsize=12)
fig.tight_layout();
else:
return imgdata_list
if verbose <= 1:
plt.show();
if verbose == 2:
imgdata_list.append(save_image_data(fig, chart_format,
plot_name+'_target', dep, mk_dir))
image_count += 1
####### End of Distplots ###########
return imgdata_list
##### Standardize all the variables in One step. But be careful !
#### All the variables must be numeric for this to work !!
def draw_violinplot(df, dep, nums,verbose,chart_format, modeltype='Regression', mk_dir=None):
plot_name = 'Violin_Plots'
df = df[:]
number_in_each_row = 8
imgdata_list = list()
width_size = 15
height_size = 4
if type(dep) == str:
othernums = [x for x in nums if x not in [dep]]
else:
othernums = [x for x in nums if x not in dep]
if modeltype == 'Regression' or dep == None or dep == '':
image_count = 0
if modeltype == 'Regression':
nums = nums + [dep]
numb = len(nums)
if numb > number_in_each_row:
rows = int(numb/number_in_each_row)+1
else:
rows = 1
plot_index = 0
for row in range(rows):
plot_index += 1
first_10 = number_in_each_row*row
next_10 = first_10 + number_in_each_row
num_10 = nums[first_10:next_10]
df10 = df[num_10]
df_norm = (df10 - df10.mean())/df10.std()
if numb <= 5:
fig = plt.figure(figsize=(min(width_size*len(num_10),width_size),min(height_size,height_size*len(num_10))))
else:
fig = plt.figure(figsize=(min(width_size*len(num_10),width_size),min(height_size,height_size*len(num_10))))
ax = fig.gca()
#ax.set_xticklabels (df.columns, tolist(), size=10)
sns.violinplot(data=df_norm, orient='v', fliersize=5, scale='width',
linewidth=3, notch=False, saturations=0.5, ax=ax, inner='box')
fig.suptitle('Violin Plot of all Continuous Variables', fontsize=15)
fig.tight_layout();
if verbose <= 1:
plt.show();
if verbose == 2:
additional = '_'+str(plot_index)+'_'
imgdata_list.append(save_image_data(fig, chart_format,
plot_name, dep, mk_dir, additional))
image_count += 1
else :
plot_name = "Box_Plots"
###### This is for Classification problems only ##########################
image_count = 0
classes = df[dep].factorize()[1].tolist()
######################### Add Box plots here ##################################
numb = len(nums)
target_vars = df[dep].unique()
if len(othernums) >= 1:
width_size = 15
height_size = 7
count = 0
data = pd.DataFrame(index=df.index)
cols = 2
noplots = len(nums)
rows = int((noplots/cols)+0.99 )
fig = plt.figure(figsize=(width_size,rows*height_size))
for col in nums:
ax = plt.subplot(rows,cols,count+1)
for targetvar in target_vars:
data[targetvar] = np.nan
mask = df[dep]==targetvar
data.loc[mask,targetvar] = df.loc[mask,col]
ax = sns.boxplot(data=data, orient='v', fliersize=5, ax=ax,
linewidth=3, notch=False, saturation=0.5, showfliers=False)
ax.set_title('%s for each %s' %(col,dep))
count += 1
fig.suptitle('Box Plots without Outliers shown', fontsize=15)
fig.tight_layout();
if verbose <= 1:
plt.show();
if verbose == 2:
imgdata_list.append(save_image_data(fig, chart_format,
plot_name, dep, mk_dir))
image_count += 1
#########################################
return imgdata_list
########## End of Violin Plots #########
#### Drawing Date Variables is very important in Time Series data
import copy
def draw_date_vars(dfx,dep,datevars, num_vars,verbose, chart_format, modeltype='Regression', mk_dir=None):
dfx = copy.deepcopy(dfx) ## use this to preserve the original dataframe
df = copy.deepcopy(dfx) #### use this for making it into a datetime index etc...
plot_name = 'Time_Series_Plots'
#### Now you want to display 2 variables at a time to see how they change over time
### Don't change the number of cols since you will have to change rows formula as well
gap = 0.3 ### adjusts the gap between rows in multiple rows of charts
imgdata_list = list()
image_count = 0
N = len(num_vars)
chunksize = 20
if N < 1 or len(datevars) == 0:
#### If there are no numeric variables, nothing to plot here ######
return imgdata_list
else:
width_size = 15
height_size = 5
if isinstance(df.index, pd.DatetimeIndex) :
pass
elif len(datevars) > 0:
try:
ts_column = datevars[0]
### if we have already found that it was a date time var, then leave it as it is. Thats good enough!
date_items = df[ts_column].apply(str).apply(len).values
date_4_digit = all(date_items[0] == item for item in date_items) ### this checks for 4 digits date
#### In some cases, date time variable is a year like 1999 (4-digit), this must be translated correctly
if date_4_digit:
if date_items[0] == 4:
### If it is just a year variable alone, you should leave it as just a year!
df[ts_column] = df[ts_column].map(lambda x: pd.to_datetime(x,format='%Y', errors='coerce')).values
else:
### if it is not a year alone, then convert it into a date time variable
if df[col].min() > 1900 or df[col].max() < 2100:
df[ts_column] = df[ts_column].map(lambda x: '0101'+str(x) if len(str(x)) == 4 else x)
df[ts_column] = pd.to_datetime(df[ts_column], format='%m%d%Y', errors='coerce')
else:
print('%s could not be indexed. Could not draw date_vars.' %col)
return imgdata_list
else:
df[ts_column] = pd.to_datetime(df[ts_column], infer_datetime_format=True, errors='coerce')
##### Now set the column to be the date - time index
df.index = df.pop(ts_column) #### This is where we set the date time column as the index ######
except:
print('%s could not be indexed. Could not draw date_vars.' %col)
return imgdata_list
####### Draw the time series for Regression and DepVar
width_size = 15
height_size = 4
cols = 2
if modeltype == 'Regression':
gap=0.5
rows = int(len(num_vars)/cols+0.99)
fig,ax = plt.subplots(figsize=(width_size,rows*height_size))
fig.subplots_adjust(hspace=gap) ### This controls the space betwen rows
df.groupby(ts_column).mean().plot(subplots=True,ax=ax,layout=(rows,cols))
fig.suptitle('Time Series Plot for each Continuous Variable by %s' %ts_column, fontsize=15,y=1.01)
elif modeltype == 'Clustering':
kind = 'line' #### you can change this to plot any kind of time series plot you want
image_count = 0
combos = combinations(num_vars, 2)
combs = copy.deepcopy(combos)
noplots = int((N**2-N)/2)
rows = int((noplots/cols)+0.99)
counter = 1
fig = plt.figure(figsize=(width_size,rows*height_size))
fig.subplots_adjust(hspace=gap) ### This controls the space betwen rows
try:
for (var1,var2) in combos:
plt.subplot(rows,cols,counter)
ax1 = plt.gca()
df[var1].plot(kind=kind, secondary_y=True, label=var1, ax=ax1)
df[var2].plot(kind=kind, title=var2 +' (left_axis) vs. ' + var1+' (right_axis)', ax=ax1)
plt.legend(loc='best')
counter += 1
fig.suptitle('Time Series Plot by %s: Pairwise Continuous Variables' %ts_column, fontsize=15,y=1.01)
except:
plt.close('all')
fig = plot_fast_average_num_by_cat(dfx, datevars, num_vars, verbose,kind="line")
else:
######## This is for Classification problems only ####
kind = 'line' ### you can decide what kind of plots you want to show here ####
image_count = 0
target_vars = df[dep].factorize()[1].tolist()
#classes = copy.deepcopy(classes)
##### Now separate out the drawing of time series data by the number of classes ###
colors = cycle('gkbyrcmgkbyrcmgkbyrcmgkbyr')
classes = df[dep].unique()
if type(classes[0])==int or type(classes[0])==float:
classes = [str(x) for x in classes]
cols = 2
count = 0
combos = combinations(num_vars, 2)
combs = copy.deepcopy(combos)
noplots = len(target_vars)
rows = int((noplots/cols)+0.99)
fig = plt.figure(figsize=(width_size,rows*height_size))
fig.subplots_adjust(hspace=gap) ### This controls the space betwen rows
counter = 1
copy_target_vars = copy.deepcopy(target_vars)
try:
for target_var in copy_target_vars:
df_target = df[df[dep]==target_var]
ax1 = plt.subplot(rows,cols,counter)
df_target.groupby(ts_column).mean().plot(subplots=False,ax=ax1)
ax1.set_title('Time Series plots for '+dep + ' value = '+target_var)
counter += 1
except:
plt.close('all')
fig = plot_fast_average_num_by_cat(df_target, datevars, num_vars, verbose,kind="line")
fig.suptitle('Time Series Plot by %s: Continuous Variables Pair' %ts_column, fontsize=15, y=1.01)
if verbose == 2:
imgdata_list.append(save_image_data(fig, chart_format,
plot_name, dep, mk_dir))
image_count += 1
return imgdata_list
############# End of Date vars plotting #########################
# This little function classifies columns into 4 types: categorical, continuous, boolean and
# certain columns that have only one value repeated that they are useless and must be removed from dataset
#Subtract RIGHT_LIST from LEFT_LIST to produce a new list
### This program is USED VERY HEAVILY so be careful about changing it
def list_difference(l1,l2):
lst = []
for i in l1:
if i not in l2:
lst.append(i)
return lst
######## Find ANY word in columns to identify ANY TYPE OF columns
####### search_for_list = ["Date","DATE", "date"], any words you want to search for it
####### columns__list and word refer to columns in the dataset that is the target dataset
####### Both columns_list and search_for_list must be lists - otherwise it won't work
def search_for_word_in_list(columns_list, search_for_list):
columns_list = columns_list[:]
search_for_list = search_for_list[:]
lst=[]
for src in search_for_list:
for word in columns_list:
result = re.findall (src, word)
if len(result)>0:
if word.endswith(src) and not word in lst:
lst.append(word)
elif (word == 'id' or word == 'ID') and not word in lst:
lst.append(word)
else:
continue
return lst
### This is a small program to look for keywords such as "id" in a dataset to see if they are ID variables
### If that doesn't work, it then compares the len of the dataframe to the variable's unique values. If
### they match, it means that the variable could be an ID variable. If not, it goes with the name of
### of the ID variable through a keyword match with "id" or some such keyword in dataset's columns.
### This is a small program to look for keywords such as "id" in a dataset to see if they are ID variables
### If that doesn't work, it then compares the len of the dataframe to the variable's unique values. If
### they match, it means that the variable could be an ID variable. If not, it goes with the name of
### of the ID variable through a keyword match with "id" or some such keyword in dataset's columns.
def analyze_ID_columns(dfin,columns_list):
columns_list = columns_list[:]
dfin = dfin[:]
IDcols_final = []
IDcols = search_for_word_in_list(columns_list,
['ID','Identifier','NUMBER','No','Id','Num','num','_no','.no','Number','number','_id','.id'])
if IDcols == []:
for eachcol in columns_list:
if len(dfin) == len(dfin[eachcol].unique()) and dfin[eachcol].dtype != float:
IDcols_final.append(eachcol)
else:
for each_col in IDcols:
if len(dfin) == len(dfin[each_col].unique()) and dfin[each_col].dtype != float:
IDcols_final.append(each_col)
if IDcols_final == [] and IDcols != []:
IDcols_final = IDcols
return IDcols_final
# THESE FUNCTIONS ASSUME A DIRTY DATASET" IN A PANDAS DATAFRAME AS Inum_j}lotsUT
# AND CONVERT THEM INTO A DATASET FIT FOR ANALYSIS IN THE END
# In [ ]:
# this function starts with dividing columns into 4 types: categorical, continuous, boolean and to_delete
# The To_Delete columns have only one unique value and can be removed from the dataset
def start_classifying_vars(dfin, verbose):
dfin = dfin[:]
cols_to_delete = []
boolean_vars = []
categorical_vars = []
continuous_vars = []
discrete_vars = []
totrows = dfin.shape[0]
if totrows == 0:
print('Error: No rows in dataset. Check your input again...')
return cols_to_delete, boolean_vars, categorical_vars, continuous_vars, discrete_vars, dfin
for col in dfin.columns:
if col == 'source':
continue
elif len(dfin[col].value_counts()) <= 1:
cols_to_delete.append(dfin[col].name)
print(' Column %s has only one value hence it will be dropped' %dfin[col].name)
elif dfin[col].dtype==object:
if (dfin[col].str.len()).any()>50:
cols_to_delete.append(dfin[col].name)
continue
elif search_for_word_in_list([col],['DESCRIPTION','DESC','desc','Text','text']):
cols_to_delete.append(dfin[col].name)
continue
elif len(dfin.groupby(col)) == 1:
cols_to_delete.append(dfin[col].name)
continue
elif dfin[col].isnull().sum() > 0:
missing_rows=dfin[col].isnull().sum()
pct_missing = float(missing_rows)/float(totrows)
if pct_missing > 0.90:
if verbose <= 1:
print('Pct of Missing Values in %s exceed 90 pct, hence will be dropped...' %col)
cols_to_delete.append(dfin[col].name)
continue
elif len(dfin.groupby(col)) == 2:
boolean_vars.append(dfin[col].name)
py_version = sys.version_info[0]
if py_version < 3:
# This is the Python 2 Version
try:
item_mode = dfin[col].mode().mode[0]
except:
print('''Scipy.stats package not installed in your Python2. Get it installed''')
else:
# This is the Python 3 Version
try:
item_mode = dfin[col].mode()[0]
except:
print('''Statistics package not installed in your Python3. Get it installed''')
dfin[col].fillna(item_mode,inplace=True)
continue
elif len(dfin.groupby(col)) < 20 and len(dfin.groupby(col)) > 1:
categorical_vars.append(dfin[col].name)
continue
else:
discrete_vars.append(dfin[col].name)
continue
elif len(dfin.groupby(col)) == 2:
boolean_vars.append(dfin[col].name)
continue
elif len(dfin.groupby(col)) < 20 and len(dfin.groupby(col)) > 1:
categorical_vars.append(dfin[col].name)
continue
else:
discrete_vars.append(dfin[col].name)
elif dfin[col].dtype=='int64' or dfin[col].dtype=='int32':
if len(dfin[col].value_counts()) <= 15:
categorical_vars.append(dfin[col].name)
else:
if dfin[col].isnull().sum() > 0:
missing_rows=dfin[col].isnull().sum()
pct_missing = float(missing_rows)/float(totrows)
if pct_missing > 0.90:
if verbose <= 1:
print('Pct of Missing Values in %s exceed 90 pct, hence will be dropped...' %col)
cols_to_delete.append(dfin[col].name)
continue
elif len(dfin.groupby(col)) == 2:
boolean_vars.append(dfin[col].name)
py_version = sys.version_info[0]
if py_version < 3:
# This is the Python 2 Version
try:
item_mode = dfin[col].mode().mode[0]
except:
print('''Scipy.stats package not installed in your Python2. Get it installed''')
else:
# This is the Python 3 Version
try:
item_mode = dfin[col].mode()[0]
except:
print('''Statistics package not installed in your Python3. Get it installed''')
dfin[col].fillna(item_mode,inplace=True)
continue
else:
if len(dfin[col].value_counts()) <= 25 and len(dfin) >= 250:
categorical_vars.append(dfin[col].name)
else:
continuous_vars.append(dfin[col].name)
elif len(dfin.groupby(col)) == 2:
boolean_vars.append(dfin[col].name)
continue
else:
if len(dfin[col].value_counts()) <= 25 and len(dfin) >= 250:
categorical_vars.append(dfin[col].name)
else:
continuous_vars.append(dfin[col].name)
return cols_to_delete, boolean_vars, categorical_vars, continuous_vars, discrete_vars, dfin
#### this is the MAIN ANALYSIS function that calls the start_classifying_vars and then
#### takes that result and divides categorical vars into 2 additional types: discrete vars and bool vars
def analyze_columns_in_dataset(dfx,IDcolse,verbose):
dfx = dfx[:]
IDcolse = IDcolse[:]
cols_delete, bool_vars, cats, nums, discrete_string_vars, dft = start_classifying_vars(dfx,verbose)
continuous_vars = nums
if nums != []:
for k in nums:
if len(dft[k].unique())==2:
bool_vars.append(k)
elif len(dft[k].unique())<=20:
cats.append(k)
elif (np.array(dft[k]).dtype=='float64' or np.array(dft[k]).dtype=='int64') and (k not in continuous_vars):
if len(dft[k].value_counts()) <= 25:
cats.append(k)
else:
continuous_vars.append(k)
elif dft[k].dtype==object:
discrete_string_vars.append(k)
elif k in continuous_vars:
continue
else:
print('The %s variable could not be classified into any known type' % k)
#print(cols_delete, bool_vars, cats, continuous_vars, discrete_string_vars)
date_vars = search_for_word_in_list(dfx.columns.tolist(),['Date','DATE','date','TIME','time',
'Time','Year','Yr','year','yr','timestamp',
'TimeStamp','TIMESTAMP','Timestamp','Time Stamp'])
date_vars = [x for x in date_vars if x not in find_remove_duplicates(cats+bool_vars) ]
if date_vars == []:
for col in continuous_vars:
if dfx[col].dtype==int:
if dfx[col].min() > 1900 or dfx[col].max() < 2100:
date_vars.append(col)
for col in discrete_string_vars:
try:
dfx.index = pd.to_datetime(dfx.pop(col), infer_datetime_format=True)
except:
continue
if isinstance(dfx.index, pd.DatetimeIndex):
date_vars = [dfx.index.name]
continuous_vars=list_difference(list_difference(continuous_vars,date_vars),IDcolse)
#cats = list_difference(continuous_vars, cats)
cats=list_difference(cats,date_vars)
discrete_string_vars=list_difference(list_difference(discrete_string_vars,date_vars),IDcolse)
return cols_delete, bool_vars, cats, continuous_vars, discrete_string_vars,date_vars, dft
# Removes duplicates from a list to return unique values - USED ONLYONCE
def find_remove_duplicates(values):
output = []
seen = set()
for value in values:
if value not in seen:
output.append(value)
seen.add(value)
return output
#################################################################################
def load_file_dataframe(dataname, sep=",", header=0, verbose=0, nrows=None,parse_dates=False):
start_time = time.time()
########################### This is where we load file or data frame ###############
if isinstance(dataname,str):
#### this means they have given file name as a string to load the file #####
codex_flag = False
codex = ['ascii', 'utf-8', 'iso-8859-1', 'cp1252', 'latin1']
if dataname != '' and dataname.endswith(('csv')):
try:
dfte = pd.read_csv(dataname, sep=sep, header=header, encoding=None,
parse_dates=parse_dates)
if not nrows is None:
if nrows < dfte.shape[0]:
print(' max_rows_analyzed is smaller than dataset shape %d...' %dfte.shape[0])
dfte = dfte.sample(nrows, replace=False, random_state=99)
print(' randomly sampled %d rows from read CSV file' %nrows)
print('Shape of your Data Set loaded: %s' %(dfte.shape,))
if len(np.array(list(dfte))[dfte.columns.duplicated()]) > 0:
print('You have duplicate column names in your data set. Removing duplicate columns now...')
dfte = dfte[list(dfte.columns[~dfte.columns.duplicated(keep='first')])]
return dfte
except:
codex_flag = True
if codex_flag:
for code in codex:
try:
dfte = pd.read_csv(dataname, sep=sep, header=header, encoding=None, nrows=nrows,
skiprows=skip_function, parse_dates=parse_dates)
except:
print(' pandas %s encoder does not work for this file. Continuing...' %code)
continue
elif dataname.endswith(('xlsx','xls','txt')):
#### It's very important to get header rows in Excel since people put headers anywhere in Excel#
if nrows is None:
dfte = pd.read_excel(dataname,header=header, parse_dates=parse_dates)
else:
dfte = pd.read_excel(dataname,header=header, nrows=nrows, parse_dates=parse_dates)
print('Shape of your Data Set loaded: %s' %(dfte.shape,))
return dfte
else:
print(' Filename is an empty string or file not able to be loaded')
return None
elif isinstance(dataname,pd.DataFrame):
#### this means they have given a dataframe name to use directly in processing #####
if nrows is None:
dfte = copy.deepcopy(dataname)
else:
if nrows < dataname.shape[0]:
print(' Since nrows is smaller than dataset, loading random sample of %d rows into pandas...' %nrows)
dfte = dataname.sample(n=nrows, replace=False, random_state=99)
else:
dfte = copy.deepcopy(dataname)
print('Shape of your Data Set loaded: %s' %(dfte.shape,))
if len(np.array(list(dfte))[dfte.columns.duplicated()]) > 0:
print('You have duplicate column names in your data set. Removing duplicate columns now...')
dfte = dfte[list(dfte.columns[~dfte.columns.duplicated(keep='first')])]
return dfte
else:
print('Dataname input must be a filename with path to that file or a Dataframe')
return None
##########################################################################################
import copy
def classify_print_vars(filename,sep, max_rows_analyzed, max_cols_analyzed,
depVar='',dfte=None, header=0,verbose=0):
corr_limit = 0.7 ### This limit represents correlation above this, vars will be removed
start_time=time.time()
if filename:
dataname = copy.deepcopy(filename)
parse_dates = True
else:
dataname = copy.deepcopy(dfte)
parse_dates = False
dfte = load_file_dataframe(dataname, sep=sep, header=header, verbose=verbose,
nrows=max_rows_analyzed, parse_dates=parse_dates)
orig_preds = [x for x in list(dfte) if x not in [depVar]]
################# CLASSIFY COLUMNS HERE ######################
var_df = classify_columns(dfte[orig_preds], verbose)
##### Classify Columns ################
IDcols = var_df['id_vars']
discrete_string_vars = var_df['nlp_vars']+var_df['discrete_string_vars']
cols_delete = var_df['cols_delete']
bool_vars = var_df['string_bool_vars'] + var_df['num_bool_vars']
int_vars = var_df['int_vars']
categorical_vars = var_df['cat_vars'] + var_df['factor_vars'] + int_vars + bool_vars
date_vars = var_df['date_vars']
if len(var_df['continuous_vars'])==0 and len(int_vars)>0:
continuous_vars = var_df['int_vars']
categorical_vars = list_difference(categorical_vars, int_vars)
int_vars = []
else:
continuous_vars = var_df['continuous_vars']
#### from now you can use wordclouds on discrete_string_vars ######################
preds = [x for x in orig_preds if x not in IDcols+cols_delete]
if len(IDcols+cols_delete) == 0:
print(' No variables removed since no ID or low-information variables found in data set')
else:
print(' %d variables removed since they were ID or low-information variables'
%len(IDcols+cols_delete))
if verbose >= 1:
print(' List of variables removed: %s' %(IDcols+cols_delete))
############# Sample data if too big and find problem type #############################
if dfte.shape[0]>= max_rows_analyzed:
print('Since Number of Rows in data %d exceeds maximum, randomly sampling %d rows for EDA...' %(len(dfte),max_rows_analyzed))
dft = dfte.sample(max_rows_analyzed, random_state=0)
else:
dft = copy.deepcopy(dfte)
###### This is where you find what type the dependent variable is ########
if type(depVar) == str:
if depVar == '':
cols_list = list(dft)
problem_type = 'Clustering'
classes = []
else:
try:
problem_type = analyze_problem_type(dft, depVar,verbose)
except:
print('Could not find given target var in data set. Please check input')
### return the data frame as is ############
return dfte
cols_list = list_difference(list(dft),depVar)
if dft[depVar].dtype == object:
classes = dft[depVar].factorize()[1].tolist()
#### You dont have to convert it since most charts can take string vars as target ####
#dft[depVar] = dft[depVar].factorize()[0]
elif dft[depVar].dtype == np.int64:
classes = dft[depVar].factorize()[1].tolist()
elif dft[depVar].dtype == bool:
classes = dft[depVar].unique().astype(int).tolist()
elif dft[depVar].dtype == float and problem_type.endswith('Classification'):
classes = dft[depVar].factorize()[1].tolist()
else:
classes = []
elif depVar == None:
cols_list = list(dft)
problem_type = 'Clustering'
classes = []
else:
depVar1 = depVar[0]
problem_type = analyze_problem_type(dft, depVar1)
cols_list = list_difference(list(dft), depVar1)
if dft[depVar1].dtype == object:
classes = dft[depVar1].factorize()[1].tolist()
#### You dont have to convert it since most charts can take string vars as target ####
#dft[depVar] = dft[depVar].factorize()[0]
elif dft[depVar1].dtype == np.int64:
classes = dft[depVar1].factorize()[1].tolist()
elif dft[depVar1].dtype == bool:
classes = dft[depVar].unique().astype(int).tolist()
elif dft[depVar1].dtype == float and problem_type.endswith('Classification'):
classes = dft[depVar1].factorize()[1].tolist()
else:
classes = []
print('Since AutoViz cannot visualize multiple targets, selecting the first one from list: %s' %depVar1)
depVar = copy.deepcopy(depVar1)
############# Check if there are too many columns to visualize ################
if len(preds) >= max_cols_analyzed:
######### In that case, SELECT IMPORTANT FEATURES HERE ######################
if problem_type.endswith('Classification') or problem_type == 'Regression':
print('Number of variables = %d exceeds limit, finding top %d variables through XGBoost' %(len(
preds), max_cols_analyzed))
important_features,num_vars, _ = find_top_features_xgb(dft,preds,continuous_vars,
depVar,problem_type,corr_limit,verbose)
if len(important_features) >= max_cols_analyzed:
print(' Since number of features selected is greater than max columns analyzed, limiting to %d variables' %max_cols_analyzed)
important_features = important_features[:max_cols_analyzed]
dft = dft[important_features+[depVar]]
#### Time to classify the important columns again ###
var_df = classify_columns(dft[important_features], verbose)
IDcols = var_df['id_vars']
discrete_string_vars = var_df['nlp_vars']+var_df['discrete_string_vars']
cols_delete = var_df['cols_delete']
bool_vars = var_df['string_bool_vars'] + var_df['num_bool_vars']
int_vars = var_df['int_vars']
categorical_vars = var_df['cat_vars'] + var_df['factor_vars'] + int_vars + bool_vars
if len(var_df['continuous_vars'])==0 and len(int_vars)>0:
continuous_vars = var_df['int_vars']
categorical_vars = list_difference(categorical_vars, int_vars)
int_vars = []
else:
continuous_vars = var_df['continuous_vars']
date_vars = var_df['date_vars']
preds = [x for x in important_features if x not in IDcols+cols_delete+discrete_string_vars]
if len(IDcols+cols_delete+discrete_string_vars) == 0:
print(' No variables removed since no ID or low-information variables found in data')
else:
print(' %d variables removed since they were ID or low-information variables'
%len(IDcols+cols_delete+discrete_string_vars))
if verbose >= 1:
print(' List of variables removed: %s' %(IDcols+cols_delete+discrete_string_vars))
dft = dft[preds+[depVar]]
else:
continuous_vars = continuous_vars[:max_cols_analyzed]
print('%d numeric variables in data exceeds limit, taking top %d variables' %(len(
continuous_vars), max_cols_analyzed))
if verbose >= 1:
print(' List of variables selected: %s' %(continuous_vars[:max_cols_analyzed]))
elif len(continuous_vars) < 1:
print('No continuous variables in this data set. No visualization can be performed')
### Return data frame as is #####
return dfte
else:
######### If above 1 but below limit, leave features as it is ######################
if not isinstance(depVar, list):
if depVar != '':
dft = dft[preds+[depVar]]
else:
dft = dft[preds+depVar]
################### Time to reduce cat vars which have more than 30 categories #############
#discrete_string_vars += np.array(categorical_vars)[dft[categorical_vars].nunique()>30].tolist()
#categorical_vars = left_subtract(categorical_vars,np.array(
# categorical_vars)[dft[categorical_vars].nunique()>30].tolist())
############# Next you can print them if verbose is set to print #########
ppt = pprint.PrettyPrinter(indent=4)
if verbose==1 and len(cols_list) <= max_cols_analyzed:
marthas_columns(dft,verbose)
print(" Columns to delete:")
ppt.pprint(' %s' %cols_delete)
print(" Boolean variables %s ")
ppt.pprint(' %s' %bool_vars)
print(" Categorical variables %s ")
ppt.pprint(' %s' %categorical_vars)
print(" Continuous variables %s " )
ppt.pprint(' %s' %continuous_vars)
print(" Discrete string variables %s " )
ppt.pprint(' %s' %discrete_string_vars)
print(" Date and time variables %s " )
ppt.pprint(' %s' %date_vars)
print(" ID variables %s ")
ppt.pprint(' %s' %IDcols)
print(" Target variable %s ")
ppt.pprint(' %s' %depVar)
elif verbose==1 and len(cols_list) > 30:
print(' Total columns > 30, too numerous to print.')
return dft,depVar,IDcols,bool_vars,categorical_vars,continuous_vars,discrete_string_vars,date_vars,classes,problem_type, cols_list
####################################################################
def marthas_columns(data,verbose=0):
"""
This program is named in honor of my one of students who came up with the idea for it.
It's a neat way of printing data types and information compared to the boring describe() function in Pandas.
"""
data = data[:]
print('Data Set Shape: %d rows, %d cols' % data.shape)
if data.shape[1] > 30:
print('Too many columns to print')
else:
if verbose==1:
print('Data Set columns info:')
for col in data.columns:
print('* %s: %d nulls, %d unique vals, most common: %s' % (
col,
data[col].isnull().sum(),
data[col].nunique(),
data[col].value_counts().head(2).to_dict()
))
print('--------------------------------------------------------------------')
################################################
################################################################################
######### NEW And FAST WAY to CLASSIFY COLUMNS IN A DATA SET #######
################################################################################
def classify_columns(df_preds, verbose=0):
"""
This actually does Exploratory data analysis - it means this function performs EDA
######################################################################################
Takes a dataframe containing only predictors to be classified into various types.
DO NOT SEND IN A TARGET COLUMN since it will try to include that into various columns.
Returns a data frame containing columns and the class it belongs to such as numeric,
categorical, date or id column, boolean, nlp, discrete_string and cols to delete...
####### Returns a dictionary with 10 kinds of vars like the following: # continuous_vars,int_vars
# cat_vars,factor_vars, bool_vars,discrete_string_vars,nlp_vars,date_vars,id_vars,cols_delete
"""
train = copy.deepcopy(df_preds)
#### If there are 30 chars are more in a discrete_string_var, it is then considered an NLP variable
max_nlp_char_size = 30
max_cols_to_print = 30
print('############## C L A S S I F Y I N G V A R I A B L E S ####################')
print('Classifying variables in data set...')
#### Cat_Limit defines the max number of categories a column can have to be called a categorical colum
cat_limit = 35
float_limit = 15 #### Make this limit low so that float variables below this limit become cat vars ###
def add(a,b):
return a+b
sum_all_cols = dict()
orig_cols_total = train.shape[1]
#Types of columns
cols_delete = [col for col in list(train) if (len(train[col].value_counts()) == 1
) | (train[col].isnull().sum()/len(train) >= 0.90)]
train = train[left_subtract(list(train),cols_delete)]
var_df = pd.Series(dict(train.dtypes)).reset_index(drop=False).rename(
columns={0:'type_of_column'})
sum_all_cols['cols_delete'] = cols_delete
var_df['bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['bool','object']
and len(train[x['index']].value_counts()) == 2 else 0, axis=1)
string_bool_vars = list(var_df[(var_df['bool'] ==1)]['index'])
sum_all_cols['string_bool_vars'] = string_bool_vars
var_df['num_bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in [np.uint8,
np.uint16, np.uint32, np.uint64,
'int8','int16','int32','int64',
'float16','float32','float64'] and len(
train[x['index']].value_counts()) == 2 else 0, axis=1)
num_bool_vars = list(var_df[(var_df['num_bool'] ==1)]['index'])
sum_all_cols['num_bool_vars'] = num_bool_vars
###### This is where we take all Object vars and split them into diff kinds ###
discrete_or_nlp = var_df.apply(lambda x: 1 if x['type_of_column'] in ['object'] and x[
'index'] not in string_bool_vars+cols_delete else 0,axis=1)
######### This is where we figure out whether a string var is nlp or discrete_string var ###
var_df['nlp_strings'] = 0
var_df['discrete_strings'] = 0
var_df['cat'] = 0
var_df['id_col'] = 0
discrete_or_nlp_vars = var_df.loc[discrete_or_nlp==1]['index'].values.tolist()
copy_discrete_or_nlp_vars = copy.deepcopy(discrete_or_nlp_vars)
if len(discrete_or_nlp_vars) > 0:
for col in copy_discrete_or_nlp_vars:
#### first fill empty or missing vals since it will blowup ###
train[col] = train[col].fillna('emptyspace')
if train[col].map(lambda x: len(x) if type(x)==str else 0).mean(
) >= 50 and len(train[col].value_counts()
) >= int(0.9*len(train)) and col not in string_bool_vars:
var_df.loc[var_df['index']==col,'nlp_strings'] = 1
elif train[col].map(lambda x: len(x) if type(x)==str else 0).mean(
) >= max_nlp_char_size and train[col].map(lambda x: len(x) if type(x)==str else 0).mean(
) < 50 and len(train[col].value_counts()
) <= int(0.9*len(train)) and col not in string_bool_vars:
var_df.loc[var_df['index']==col,'discrete_strings'] = 1
elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()
) <= int(0.9*len(train)) and col not in string_bool_vars:
var_df.loc[var_df['index']==col,'discrete_strings'] = 1
elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()
) == len(train) and col not in string_bool_vars:
var_df.loc[var_df['index']==col,'id_col'] = 1
else:
var_df.loc[var_df['index']==col,'cat'] = 1
nlp_vars = list(var_df[(var_df['nlp_strings'] ==1)]['index'])
sum_all_cols['nlp_vars'] = nlp_vars
discrete_string_vars = list(var_df[(var_df['discrete_strings'] ==1) ]['index'])
sum_all_cols['discrete_string_vars'] = discrete_string_vars
###### This happens only if a string column happens to be an ID column #######
#### DO NOT Add this to ID_VARS yet. It will be done later.. Dont change it easily...
#### Category DTYPE vars are very special = they can be left as is and not disturbed in Python. ###
var_df['dcat'] = var_df.apply(lambda x: 1 if str(x['type_of_column'])=='category' else 0,
axis=1)
factor_vars = list(var_df[(var_df['dcat'] ==1)]['index'])
sum_all_cols['factor_vars'] = factor_vars
########################################################################
date_or_id = var_df.apply(lambda x: 1 if x['type_of_column'] in [np.uint8,
np.uint16, np.uint32, np.uint64,
'int8','int16',
'int32','int64'] and x[
'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,
axis=1)
######### This is where we figure out whether a numeric col is date or id variable ###
var_df['int'] = 0
var_df['date_time'] = 0
### if a particular column is date-time type, now set it as a date time variable ##
var_df['date_time'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['<M8[ns]','datetime64[ns]'] and x[
'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,
axis=1)
### this is where we save them as date time variables ###
if len(var_df.loc[date_or_id==1]) != 0:
for col in var_df.loc[date_or_id==1]['index'].values.tolist():
if len(train[col].value_counts()) == len(train):
if train[col].min() < 1900 or train[col].max() > 2050:
var_df.loc[var_df['index']==col,'id_col'] = 1
else:
try:
pd.to_datetime(train[col],infer_datetime_format=True)
var_df.loc[var_df['index']==col,'date_time'] = 1
except:
var_df.loc[var_df['index']==col,'id_col'] = 1
else:
if train[col].min() < 1900 or train[col].max() > 2050:
if col not in num_bool_vars:
var_df.loc[var_df['index']==col,'int'] = 1
else:
try:
pd.to_datetime(train[col],infer_datetime_format=True)
var_df.loc[var_df['index']==col,'date_time'] = 1
except:
if col not in num_bool_vars:
var_df.loc[var_df['index']==col,'int'] = 1
else:
pass
int_vars = list(var_df[(var_df['int'] ==1)]['index'])
date_vars = list(var_df[(var_df['date_time'] == 1)]['index'])
id_vars = list(var_df[(var_df['id_col'] == 1)]['index'])
sum_all_cols['int_vars'] = int_vars
copy_date_vars = copy.deepcopy(date_vars)
for date_var in copy_date_vars:
#### This test is to make sure sure date vars are actually date vars
try:
| pd.to_datetime(train[date_var],infer_datetime_format=True) | pandas.to_datetime |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0.0, np.nan, 2.0], index=index)
tm.assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=float)
tm.assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0, np.nan, 2], index=index, dtype=float)
tm.assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([True, np.nan, False], index=index, dtype=object)
tm.assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype="M8[ns]")
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3,), dtype=float).harden_mask()
result = pd.Series(data)
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = | date_range("20090415", "20090519", freq="B") | pandas.date_range |
import pickle
import pandas as pd
import numpy as np
crnn2_result = pickle.load(open('../../CRNN2/crnn_results/crnn_results_summary.p', 'rb'))
crnn4_result = pickle.load(open('../../CRNN4/crnn_results/crnn_results_summary.p', 'rb'))
crnn6_result = pickle.load(open('../../CRNN6/crnn_results/crnn_results_summary.p', 'rb'))
crnn8_result = pickle.load(open('../../CRNN8/crnn_results/crnn_results_summary.p', 'rb'))
crnn10_result = pickle.load(open('../../CRNN10/crnn_results/crnn_results_summary.p', 'rb'))
crnn40_result = pickle.load(open('../../CRNN40/crnn_results/crnn_results_summary.p', 'rb'))
crnn100_result = pickle.load(open('../../CRNN100/crnn_results/crnn_results_summary.p', 'rb'))
crnn400_result = pickle.load(open('../../CRNN400/crnn_results/crnn_results_summary.p', 'rb'))
crnn1200_result = pickle.load(open('../../CRNN1200/crnn_results/crnn_results_summary.p', 'rb'))
vgg_result = pickle.load(open('../../VGG/results/vgg_results_summary.p', 'rb'))
lenet_result = pickle.load(open('../../LENET/results/lenet_results_summary.p', 'rb'))
svm_result = pickle.load(open('../../SVM/results/svm_results_summary.p', 'rb'))
result_summary = {'crnn2': pd.DataFrame(crnn2_result), 'crnn4': pd.DataFrame(crnn4_result), 'crnn6': pd.DataFrame(crnn6_result),
'crnn8': pd.DataFrame(crnn8_result), 'crnn10': pd.DataFrame(crnn10_result), 'crnn40': pd.DataFrame(crnn40_result),
'crnn100': pd.DataFrame(crnn100_result), 'crnn400': pd.DataFrame(crnn400_result), 'crnn1200': pd.DataFrame(crnn1200_result),
'vgg': pd.DataFrame(vgg_result), 'lenet': pd.DataFrame(lenet_result), 'svm': pd.DataFrame(svm_result)}
result_summary = pd.concat(result_summary)
result_summary.to_csv('../result/result_summary.csv', sep = ',')
crnn_pitch_shift = pickle.load(open('../../CRNN400/crnn_results/pitch_shift_results.p', 'rb'))
crnn_time_stretch = pickle.load(open('../../CRNN400/crnn_results/time_stretch_results.p', 'rb'))
crnn_crop = pickle.load(open('../../CRNN400/crnn_results/crop_results.p', 'rb'))
lenet_pitch_shift = pickle.load(open('../../LENET/results/pitch_shift_results.p', 'rb'))
lenet_time_stretch = pickle.load(open('../../LENET/results/time_stretch_results.p', 'rb'))
lenet_crop = pickle.load(open('../../LENET/results/crop_results.p', 'rb'))
svm_pitch_shift = pickle.load(open('../../SVM/results/pitch_shift_results.p', 'rb'))
svm_time_stretch = pickle.load(open('../../SVM/results/time_stretch_results.p', 'rb'))
svm_crop = pickle.load(open('../../SVM/results/crop_results.p', 'rb'))
simulation_summary = {'crnn_picth_shift': pd.DataFrame(crnn_pitch_shift), 'crnn_time_stretch': pd.DataFrame(crnn_time_stretch),
'crnn_crop': pd.DataFrame(crnn_crop), 'lenet_picth_shift': | pd.DataFrame(lenet_pitch_shift) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
import gc
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
import logging
import itertools
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split
#modify to work with kfold
#def smoteAdataset(Xig, yig, test_size=0.2, random_state=0):
#def smoteAdataset(Xig_train, yig_train, Xig_test, yig_test):
# sm=SMOTE(random_state=2)
# Xig_train_res, yig_train_res = sm.fit_sample(Xig_train, yig_train.ravel())
# return Xig_train_res, pd.Series(yig_train_res), Xig_test, pd.Series(yig_test)
def create_logger():
logger_ = logging.getLogger('main')
logger_.setLevel(logging.DEBUG)
fh = logging.FileHandler('simple_lightgbm.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(levelname)s]%(asctime)s:%(name)s:%(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger_.addHandler(fh)
logger_.addHandler(ch)
def get_logger():
return logging.getLogger('main')
def lgb_multi_weighted_logloss(y_true, y_preds):
classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95]
class_weight = {6: 1, 15: 2, 16: 1, 42: 1, 52: 1, 53: 1, 62: 1, 64: 2, 65: 1, 67: 1, 88: 1, 90: 1, 92: 1, 95: 1}
if len(np.unique(y_true)) > 14:
classes.append(99)
class_weight[99] = 2
y_p = y_preds.reshape(y_true.shape[0], len(classes), order='F')
y_ohe = | pd.get_dummies(y_true) | pandas.get_dummies |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.