prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import numpy as np
import pandas as pd
from collections import Counter
from sklearn.datasets import load_svmlight_file
def sparsity(X):
number_of_nan = np.count_nonzero(np.isnan(X))
number_of_zeros = np.count_nonzero(np.abs(X) < 1e-6)
return (number_of_nan + number_of_zeros) / float(X.shape[0] * X.shape[1]) * 100.
def print_dataset_statistics(X, y, queries, name):
print('----------------------------------')
print("Characteristics of dataset " + name)
print("rows x columns " + str(X.shape))
print("sparsity: " + str(sparsity(X)))
print("y distribution")
print(Counter(y))
print("num samples in queries: minimum, median, maximum")
num_queries = Counter(queries).values()
print(np.min(num_queries), np.median(num_queries), np.max(num_queries))
print('----------------------------------')
def process_libsvm_file(file_name):
X, y, queries = load_svmlight_file(file_name, query_id=True)
return X.todense(), y, queries
def dump_to_file(out_file_name, X, y, queries):
all = np.hstack((y.reshape(-1, 1), queries.reshape(-1, 1), X))
| pd.DataFrame(all) | pandas.DataFrame |
import pandas as pd
import graphlab as gl
orderData = | pd.read_csv("Data/orders.csv") | pandas.read_csv |
import os
import pandas as pd
statdir = '/u/58/wittkes3/unix/Documents/bdeo/stats/18'
csvname = '/u/58/wittkes3/unix/Documents/bdeo/s1_VVVH_18.csv'
attributes = '/media/wittkes3/satdat6/bigdataeo_LUKE/original/feb20/reference-zone1-2017.csv'
datelist=[]
fulldf=None
for x in os.listdir(statdir):
xpa = os.path.join(statdir,x)
df = pd.read_csv(xpa)
#print(df.head())
meandf = df[['parcelID','mean']]
#mediandf = df[['parcelID','median']]
year = x.split('_')[2][:4]
pol = x.split('_')[4]
meandf.rename(columns={'parcelID':'parcelID','mean':'mean_'+ pol},inplace=True)
print(meandf.head())
meandf['parcelID'] = meandf['parcelID'].apply(lambda x: "{}{}{}".format(year,'_', x))
#print(newdf)
#datelist.append(date)
if fulldf is None:
fulldf = meandf
else:
fulldf= pd.merge(fulldf,meandf, on='parcelID')
#sorteddf = fulldf.reindex(sorted(fulldf.columns), axis=1)
#sorteddf.rename(columns={'0ID':'PlotID'}, inplace=True)
print(fulldf)
dfa = pd.read_csv(attributes)
dfa['parcelID'] = dfa['parcelID'].apply(lambda x: "{}{}{}".format(year,'_', x))
print(dfa)
dfauv = | pd.merge(fulldf,dfa, on='parcelID' ) | pandas.merge |
import requests
import pandas as pd
import os
import sys
import io
utils_path = os.path.join(os.path.abspath(os.getenv('PROCESSING_DIR')),'utils')
if utils_path not in sys.path:
sys.path.append(utils_path)
import util_files
import util_cloud
import util_carto
from zipfile import ZipFile
import glob
import shutil
import logging
import datetime
# Set up logging
# Get the top-level logger object
logger = logging.getLogger()
for handler in logger.handlers: logger.removeHandler(handler)
logger.setLevel(logging.INFO)
# make it print to the console.
console = logging.StreamHandler()
logger.addHandler(console)
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# name of table on Carto where you want to upload data
# this should be a table name that is not currently in use
dataset_name = 'soc_004_rw1_human_development_index' #check
logger.info('Executing script for dataset: ' + dataset_name)
# create a new sub-directory within your specified dir called 'data'
# within this directory, create files to store raw and processed data
data_dir = util_files.prep_dirs(dataset_name)
'''
Download data and save to your data directory
Data can be downloaded at the following link:
http://hdr.undp.org/en/indicators/137506#
Above the data table, you will see a 'Download Data' button
Once you click this button, the data will be downloaded as a csv file to your Downloads folder.
'''
logger.info('Downloading raw data')
download = glob.glob(os.path.join(os.path.expanduser("~"), 'Downloads', 'Human Development Index (HDI).csv'))[0]
# Move this file into your data directory
raw_data_file = os.path.join(data_dir, os.path.basename(download))
shutil.move(download,raw_data_file)
'''
Process data
'''
# read the data as a pandas dataframe
df = pd.read_csv(raw_data_file)
# remove empty columns from the dataframe
df.dropna(axis = 1, how = 'all', inplace = True)
# remove empty rows and rows that only contain metadata
df.dropna(axis = 0, how = 'any', inplace = True)
# replace the '..' in the dataframe with None
df.replace('..', 'None', inplace = True)
# convert the data type of the column 'HDI Rank' to integer
df['HDI Rank'] = pd.to_numeric(df['HDI Rank'], errors='coerce')
df['HDI Rank'] = df['HDI Rank'].astype('Int32')
# convert the dataframe from wide to long format
# so there will be one column indicating the year and another column indicating the index
df = df.melt(id_vars = ['HDI Rank', 'Country'])
# rename the 'variable' and 'value' columns created in the previous step to 'year' and 'yr_data'
df.rename(columns = { 'variable': 'year', 'value':'yr_data'}, inplace = True)
# convert the data type of the 'year' column to integer
df['year'] = df['year'].astype('int')
# convert the years in the 'year' column to datetime objects and store them in a new column 'datetime'
df['datetime'] = [datetime.datetime(x, 1, 1) for x in df.year]
# convert the data type of column 'yr_data' to float
df['yr_data'] = | pd.to_numeric(df['yr_data'], errors='coerce') | pandas.to_numeric |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import easygui
import pandas as pd
from PyQt5 import QtWidgets as qtw
from PyQt5 import QtGui as qtg
from PyQt5 import QtCore as qtc
from GUI.MainWindow import Ui_MainWindow
from GUI.RefreshDataBasePopButton import Ui_RefreshDataBasePopButton
from GUI.StatsPopButton import Ui_StatsPopButton
from GUI.DarwinizerPopButton import Ui_Darwinizer
from scripts.refreshdatabase import *
from scripts.graph_and_stats import *
from scripts.darwinizer import *
class MainWindow(qtw.QMainWindow, Ui_MainWindow):
def __init__(self):
"""MainWindow constructor"""
super().__init__()
self.setupUi(self)
self.setCentralWidget(self.centralwidget)
self.setWindowTitle("Darwin Connect")
# Main UI code goes here
"""Barra de menu"""
self.actionOpen.triggered.connect(lambda: self.filesearcher()) #Boton Abrir de la barra menu
self.actionDestino.triggered.connect(lambda: self.diropenbox())
self.actionSalir.triggered.connect(self.close) #Boton Salir de la barra menu
"""Ventana principal"""
self.refresh_button.clicked.connect(lambda: self.refresh_button_UI())
self.stats_button.clicked.connect(self.stats_button_UI)
self.darwinizer_button.clicked.connect(self.darwinizer_button_UI)
#End main UI code
self.show
def filesearcher(self): #Funcion para obtener directorio del archivo excel a analizar
route= easygui.fileopenbox()
self.xlsx_route_response_label.setText(route)
#Verificar como obtener el return en la función principal si no guardar esto en un archivo temporal.... o ver de como copiar info de label
def diropenbox(self):
easygui.msgbox(msg="Seleccione un directorio en el cual se guardaran los archivos creados.\nSi ya ha hecho este proceso con este archivo seleccione el mismo directorio",ok_button="Ok")
route_destiny_response_label=easygui.diropenbox()
self.route_destiny_response_label.setText(route_destiny_response_label)
def refresh_button_UI(self):
self.build=RefreshDataBaseButton(self.xlsx_route_response_label.text(),self.route_destiny_response_label.text())
self.build.show()
def stats_button_UI(self):
self.build=StatsButton(self.route_destiny_response_label.text())
self.build.show()
def darwinizer_button_UI(self):
self.build=DarwinizerButton(self.xlsx_route_response_label.text(),self.route_destiny_response_label.text())
self.build.show()
class RefreshDataBaseButton(qtw.QWidget, Ui_RefreshDataBasePopButton):
def __init__(self,OriginPathWay,DestinyPathWay):
"""MainWindow constructor"""
super().__init__()
self.setupUi(self)
self.setWindowTitle("Darwin Connect")
# Var Definitions
self.OriginPathWay=OriginPathWay
self.DestinyPathway=DestinyPathWay
# Main UI code goes here
self.xlsx_route_response_label.setText(OriginPathWay)
self.route_destiny_response_label.setText(DestinyPathWay)
self.exitbutton.clicked.connect(self.close)
self.execute_button.clicked.connect(lambda: self.refresh_button_func())
#End main UI code
self.show()
def refresh_button_func(self):
full_df,organized_df,indexo=refreshdatabase().file_organizer(self.xlsx_route_response_label.text(),self.route_destiny_response_label.text()) #Abrir archivo excel, organized_df just dwc values
IDs=organized_df.index.tolist()
print('compare/create files...')
if os.path.isdir(f"{self.route_destiny_response_label.text()}/files")==True:
for id in IDs:
refreshdatabase().comparefiles(id,organized_df.loc[id],"dwc_files",self.route_destiny_response_label.text())
else:
for id in IDs:
refreshdatabase().infowriting(id,organized_df.loc[id],"dwc_files",self.route_destiny_response_label.text())
"""SECCION PARA SHOWROOM"""
if self.question_1_pos_ans.isChecked()==True:
showroom_option_answer=True
elif self.question_1_neg_ans.isChecked():
showroom_option_answer=False
if showroom_option_answer==True:
showroom_df=refreshdatabase().visitors_file_maker(full_df,self.route_destiny_response_label.text(),indexo)
#aca va la funcion de organizacion de showroom
if os.path.isdir(f'{self.route_destiny_response_label}\showroom_files')==True:
for id in IDs:
refreshdatabase().comparefiles(id,showroom_df.loc[id],"invited",self.route_destiny_response_label.text())
else:
for id in IDs:
refreshdatabase().infowriting(id,showroom_df.loc[id],"invited",self.route_destiny_response_label.text())
print ('No hay nada más que hacer por el momento...')
#************************************************************************#
print("Creando codigos Qr")
api_key=self.Firebase_key_ans.text()
sub_domain=self.Firebase_domain_ans.text()
GitHub_user=self.GitHub_user_ans.text()
GitHub_repo=self.GitHub_repository_ans.text()
qr_tools_class=qr_tools(api_key,sub_domain,GitHub_user,GitHub_repo,self.route_destiny_response_label.text(),IDs,"dwc_files")
qr_tools_class.qr_manager()
if showroom_option_answer==True:
qr_tools_class=qr_tools(api_key,sub_domain,GitHub_user,GitHub_repo,self.route_destiny_response_label.text(),IDs,"invited")
qr_tools_class.qr_manager()
else:
pass
refreshdatabase().df_to_csv(full_df,self.DestinyPathway) #save full_df to a csv
class StatsButton(qtw.QWidget,Ui_StatsPopButton):
def __init__(self,DestinyPathWay,stat_df=pd.DataFrame()):
"""MainWindow Constructor"""
super().__init__()
self.setupUi(self)
self.setWindowTitle("Darwin Connect")
# Var Definitions
self.DestinyPathway=DestinyPathWay
self.stat_df=stat_df
#Read df
self.stat_df= | pd.read_csv(f"{DestinyPathWay}/csv/full_df.csv",header=0,sep=',') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 27 13:30:31 2020
@author: User
"""
import sys
import datetime as dt
from collections import Counter
import pprint
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from matplotlib import cm
from matplotlib import gridspec
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
# import os
from platform import system
import glob
import cycler
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from bs4 import BeautifulSoup
import re
from scipy.stats import linregress
# from sklearn import linear_model
import scipy.signal
import itertools
from itertools import chain, repeat
import logging
import datetime as dt
from pathlib import Path
# import h5py
from multiprocessing import Pool, cpu_count
# import timeit
# import time
matplotlib.rcParams.update({"font.size": 16})
plt.rcParams["font.family"] = "sans-serif"
plt.rcParams["font.sans-serif"] = "Helvetica"
plt.rcParams["axes.edgecolor"] = "#333F4B"
plt.rcParams["xtick.color"] = "#333F4B"
plt.rcParams["ytick.color"] = "#333F4B"
try:
import statsmodels.formula.api as smf
import statsmodels.api as sm
import seaborn as sns
except Exception as e:
print("No modules: %s" % e)
from file_py_helper.find_folders import FindExpFolder
from file_py_helper.file_functions import FileOperations
from file_py_helper.PostChar import (
SampleSelection,
Characterization_TypeSetting,
SampleCodesChar,
)
if __name__ == "__main__":
print(f"Package: {__package__}, File: {__file__}")
from elchempy.main_run_PAR_DW import ECRunOVV
from elchempy.indexer.prepare_input import CleanUpCrew
from elchempy.experiments.EIS.models import Model_Collection
import post_helper
import merger
# import EC
# sys.path.append(list(FH_path.rglob('*.py')))
# import FH_path.joinpath('FindExpFolder.py')
# import FindExpFolder.py
# from FileHelper import FindExpFolder
# from FindExpFolder import *
# from .experiments import EIS
# from .runEC import run_PAR_DW
from elchempy.runEC.EC_logging_config import start_logging
# logger = start_logging(__name__)
else:
# print('\n\n***** run_PAR_DW *****')
print(f"File: {__file__}, Name:{__name__}, Package:{__package__}")
# FH_path = Path(__file__).parent.parent.parent
# sys.path.append(str(FH_path))
# import FileHelper
from elchempy.main_run_PAR_DW import ECRunOVV
from elchempy.indexer.prepare_input import CleanUpCrew
from elchempy.runEC.EC_logging_config import start_logging
from elchempy.PostEC import post_helper, merger
from elchempy.experiments.EIS.models import Model_Collection
# logger = start_logging(__name__)
_logger = logging.getLogger(__name__)
_logger.setLevel(20)
EvRHE = "E_AppV_RHE"
class PostEC:
AllColls = [
"Unnamed: 0",
"Segment #",
"Point #",
"E(V)",
"I(A)",
"Elapsed Time(s)",
"Current Range",
"Status",
"E Applied(V)",
"Frequency(Hz)",
"Z Real",
"Z Imag",
"ActionId",
"AC Amplitude",
"RHE_OCP",
"E_AppV_RHE",
"E_Applied_VRHE",
"j A/cm2",
"jmAcm-2",
"jcorr",
"Gas",
"EXP",
"Electrode",
"j_ring",
"RPM",
"Comment",
"Measured_OCP",
"pH",
"Electrolyte",
"ScanRate_calc",
"SampleID",
"File",
"BaseName",
"hash",
"Instrument",
"DATE",
"EvRHE_diff",
"DestFile",
"Sweep_Type",
"Type",
"Cycle",
"DAC_V",
"Scanrate",
"ORR_scan",
"Jcorr",
"J_N2_scan",
"J_O2_diff",
"J_O2_diff_diff",
"Analysis_date",
"J_2nd_diff",
"Jkin_max",
"Jkin_min",
"E_onset",
"Diff_lim",
"E_half",
"I(A)_ring",
"I(A)_disk",
"Frac_H2O2",
"J_ring",
"n_ORR",
]
DropColls = [
"Unnamed: 0",
"Segment #",
"Point #",
"E(V)",
"I(A)",
"Elapsed Time(s)",
"Current Range",
"Status",
"E Applied(V)",
"Frequency(Hz)",
"Z Real",
"Z Imag",
"ActionId",
"AC Amplitude",
"RHE_OCP",
"E_AppV_RHE",
"jmAcm-2",
"jcorr",
"Gas",
"EXP",
"Electrode",
"j_ring",
"RPM",
"Comment",
"Measured_OCP",
"pH",
"Electrolyte",
"ScanRate_calc",
"SampleID",
"File",
"BaseName",
"hash",
"Instrument",
"DATE",
"EvRHE_diff",
"DestFile",
"Sweep_Type",
"Type",
"Cycle",
"DAC_V",
"Scanrate",
"ORR_scan",
"Jcorr",
"J_N2_scan",
"J_O2_diff",
"J_O2_diff_diff",
"Analysis_date",
"J_2nd_diff",
"Jkin_max",
"Jkin_min",
"E_onset",
"Diff_lim",
"E_half",
"I(A)_ring",
"I(A)_disk",
"Frac_H2O2",
"J_ring",
"n_ORR",
]
KeepColls = [
"E_AppV_RHE",
"jmAcm-2",
"Jcorr",
"J_N2_scan",
"Jkin_max",
"Jkin_min",
"Frac_H2O2",
"J_ring",
"n_ORR",
]
# SampleCodes = FindExpFolder.LoadSampleCode()
# FindExpFolder('VERSASTAT').SampleCodeLst
# PostDestDir.mkdir(parents=True,exist_ok=True)
# ExpPARovv = EC_loadOVV()
# OnlyRecentMissingOVV = runEC.MainPrepareList()
# ExpPARovv = ExpPARovv.iloc[100:120]
OutParsID = pd.DataFrame()
# Go1, Go2, Go3 = True, False, False
# Go1, Go2, Go3 = False, True, False
Go1, Go2, Go3 = False, False, True
# KL_coeff = KL_coefficients()
EvRHE_List = [
0,
0.1,
0.2,
0.3,
0.4,
0.45,
0.5,
0.55,
0.6,
0.65,
0.7,
0.75,
0.8,
0.9,
1,
]
def __init__(self):
self.DestDir = FindExpFolder("VERSASTAT").PostDir
@staticmethod
def StartLogging(level_log="INFO"):
# level_log = kwargs['level']
log_fn = FindExpFolder("VERSASTAT").PostDir.joinpath("PostEC_logger.log")
logging.basicConfig(
filename=log_fn,
filemode="w",
level=level_log,
format="%(asctime)s %(levelname)s, %(lineno)d: %(message)s",
)
logging.warning("Started logging for PostEC script...")
def applyParallel(dfGrouped, func):
with Pool(cpu_count() - 1) as p:
ret_list = p.map(func, [group for name, group in dfGrouped])
return ret_list
def check_status(file, verbose=False):
"""Check status will return (status,extra) of filename"""
PAR_file_test = Path(str(file)).stem
match = [
re.search("(?<!VERS|Vers)(AST|postAST|pAST)", str(a))
for a in PAR_file_test.split("_")
]
if any(match):
status = "EoL"
extra = [
a
for a in PAR_file_test.split("_")
if [i for i in match if i][0][0] in a
]
if verbose:
print(file, status, *extra)
return status, extra[0]
# if any([re.search(r'', i) for i in str(Path(str(file)).stem.split('_'))]):
else:
return "BoL", 0
# status =
# extra = [0]
# return status,extra
def postEC_Status(files, verbose=False):
# files = ['N2_HER_1500rpm_JOS6_pAST-sHA_285_#3_Disc_Parstat']
if len(files) > 1:
status_lst, extra_lst = [], []
for file in files:
status, extra = PostEC.check_status(file)
status_lst.append(status)
extra_lst.append(extra)
return status_lst, extra_lst
else:
return PostEC.check_status(files)
def OLD_PostOrganizeFolders(TakeRecentList=True):
postOVV = []
PostDestDir = FindExpFolder("VERSASTAT").DestDir.joinpath("PostEC")
PAR_version = FileOperations.version
RunOVV_fn_opts = list(
FindExpFolder("VERSASTAT").DestDir.rglob(
"RunOVV_v{0}.xlsx".format(PAR_version)
)
)
RunOVV_fn = [i for i in RunOVV_fn_opts if not "_Conflict" in i.stem][0]
if RunOVV_fn.is_file() and TakeRecentList == True:
OvvFromFile = pd.read_excel(RunOVV_fn, index_col=[0])
status, extra = PostEC.postEC_Status(OvvFromFile.PAR_file.values)
OvvFromFile = OvvFromFile.assign(
**{
"Date_PAR_EXP": OvvFromFile.PAR_date - OvvFromFile.EXP_date,
"Status": status,
"Extra": extra,
}
)
OnlyRecentMissingOVV = OvvFromFile
# OvvFromFile['Date_PAR_EXP'] = OvvFromFile.PAR_date-OvvFromFile.EXP_date
# OvvFromFile['Status'] = OvvFromFile.PAR_file.values
print("EC OVV loaded from file:{0}".format(RunOVV_fn))
OnlyRecentMissingOVV = FileOperations.ChangeRoot_DF(
OnlyRecentMissingOVV, ["Dest_dir", "EXP_dir", "PAR_file"]
)
# CS_parts_PDD = FileOperations.find_CS_parts(PostDestDir)
# CS_parts_pOVV = FileOperations.find_CS_parts(OnlyRecentMissingOVV.Dest_dir.iloc[0])
# chLst =[]
# if CS_parts_PDD[0] != CS_parts_pOVV[0]:
# chLst = [CS_parts_PDD[0].joinpath(FileOperations.find_CS_parts(i)[1]) for i in OnlyRecentMissingOVV.Dest_dir.values]
# OnlyRecentMissingOVV['Dest_dir'] = chLst
# else:
# pass
postOVVlst, outLst = [], []
postOVVcols = [
"DestFilename",
"SampleID",
"Status",
"Status_extra",
"Electrolyte",
"Gas",
"RPM",
"Scanrate",
"EXP_date",
"Type_Exp",
"SourceFilename",
"Exp_dir",
]
# postOVVout = PostEC.FromListgrp(group)
# postOVVlst = PostEC.applyParallel(OnlyRecentMissingOVV.groupby('Dest_dir'),PostEC.FromListgrp)
# postOVVlst = [outLst.append(PostEC.FromListgrp(i)) for i in OnlyRecentMissingOVV.groupby('Dest_dir')]
# for i in OnlyRecentMissingOVV.groupby('Dest_dir'):
# PostEC.FromListgrp(i)
# try:
# postOVVout = pd.DataFrame(postOVVlst,columns=)
# except Exception as e:
# postOVVout = pd.DataFrame(postOVVlst)
# for n,gr in OnlyRecentMissingOVV.groupby(by=['Dest_dir']):
# PostEC.FromListgrp(n,gr.EXP_dir.unique()[0])
# pass
# postOVVlst = [outLst.append(PostEC.FromListgrp(n,gr.EXP_dir.unique()[0])) for n,gr in OnlyRecentMissingOVV.groupby(by=['Dest_dir'])]
postOVVout = pd.concat(
[pd.DataFrame(i, columns=postOVVcols) for i in outLst],
sort=False,
ignore_index=True,
)
postOVVout.to_excel(PostDestDir.joinpath("postEC_Organized.xlsx"))
return postOVVout
class EnterExitLog:
def __init__(self, funcName):
self.funcName = funcName
def __enter__(self):
_logger.info(f"Started: {self.funcName}")
self.init_time = dt.datetime.now()
return self
def __exit__(self, type, value, tb):
self.end_time = dt.datetime.now()
self.duration = self.end_time - self.init_time
_logger.info(f"Finished: {self.funcName} in {self.duration} seconds")
def func_timer_decorator(func):
def func_wrapper(*args, **kwargs):
with EnterExitLog(func.__name__):
return func(*args, **kwargs)
return func_wrapper
def get_daily_pickle(exp_type=""):
today = dt.datetime.now().date()
_result = {"today": today}
if exp_type:
daily_pickle_path = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}.pkl.compress"
)
daily_pkl_options = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}.pkl.compress"
)
)
daily_pkl_options = sorted(daily_pkl_options, key=lambda x: x.stat().st_ctime)
_result.update(
{
"daily_path": daily_pickle_path,
"_exists": daily_pickle_path.exists(),
"daily_options": daily_pkl_options,
}
)
daily_pickle_path_RAW = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}_RAW.pkl.compress"
)
daily_pkl_options_RAW = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}_RAW.pkl.compress"
)
)
daily_pkl_options_RAW = sorted(
daily_pkl_options_RAW, key=lambda x: x.stat().st_ctime
)
_result.update(
{
"daily_path_RAW": daily_pickle_path_RAW,
"_raw_exists": daily_pickle_path_RAW.exists(),
"daily_options_RAW": daily_pkl_options_RAW,
}
)
if "EIS" in exp_type:
_result.update(
{
"daily_path_BRUTE": FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_BRUTE_{system()}.pkl.compress"
),
"daily_path_RAW_WB": FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_RAW_WB_{system()}.pkl.compress"
),
}
)
return _result
def _collect_test():
tt = CollectLoadPars(load_type="fast")
class CollectLoadPars:
def __init__(self, load_type="fast"):
self.load_type = load_type
self.load_pars()
self.collect_dict()
def load_pars(self):
_BaseLoad = BaseLoadPars()
_kws = {"EC_index": _BaseLoad.EC_index, "SampleCodes": _BaseLoad.SampleCodes}
if "fast" in self.load_type:
_kws.update(**{"reload": False, "reload_raw": False})
self.EIS_load = EIS_LoadPars(**_kws)
self.ORR_load = ORR_LoadPars(**_kws)
self.N2_load = N2_LoadPars(**_kws)
def collect_dict(self):
_load_attrs = [i for i in self.__dict__.keys() if i.endswith("_load")]
_collect = {}
for _load_pars in _load_attrs:
_pars_name = f'{_load_pars.split("_")[0]}_pars'
if hasattr(getattr(self, _load_pars), _pars_name):
_pars = getattr(getattr(self, _load_pars), _pars_name)
_collect.update({_pars_name: _pars})
self.pars_collection = _collect
class BaseLoadPars:
_required_funcs = [
"make_raw_pars_from_scratch",
"edit_raw_columns",
"search_pars_files",
"read_in_pars_files",
"extra_stuff_delegator",
]
def __init__(
self,
EC_index=pd.DataFrame(),
SampleCodes=pd.DataFrame(),
exp_type="",
reload=False,
reload_raw=False,
):
self.exp_type = exp_type
self._auto_set_exp_type()
self.EC_index = EC_index
self.SampleCodes = SampleCodes
self._check_class_req_functions()
self.check_EC_index()
self.set_OVV_exp_type()
self._reload = reload
self._reload_raw = reload_raw
self.get_daily_pickle()
if self.exp_type:
self.load_delegator()
def _auto_set_exp_type(self):
_cls_name = self.__class__.__name__
if "_" in _cls_name:
_cls_exp_type = _cls_name.split("_")[0]
_exp_type = f"{_cls_exp_type}_pars"
self.exp_type = _exp_type
def check_EC_index(self):
if self.EC_index.empty:
EC_index = ECRunOVV(load=1).EC_index
EC_index = FileOperations.ChangeRoot_DF(EC_index, [])
EC_index.PAR_file = EC_index.PAR_file.astype(str)
EC_index["Loading_cm2"] = EC_index["Loading_cm2"].round(3)
self.EC_index = EC_index
if self.SampleCodes.empty:
SampleCodes = FindExpFolder().LoadSampleCode()
self.SampleCodes = SampleCodes
# SampleCodesChar().load
def set_OVV_exp_type(self):
if not self.EC_index.empty and self.exp_type:
PAR_exp_uniq = self.EC_index.PAR_exp.unique()
PAR_match = [
parexp
for parexp in PAR_exp_uniq
if self.exp_type.split("_")[0] in parexp
]
self.exp_type_match = PAR_match
# if PAR_match:
EC_index_exp = self.EC_index.loc[self.EC_index.PAR_exp.isin(PAR_match)]
self.EC_index_exp = EC_index_exp
if EC_index_exp.empty:
_logger.error(f'set_OVV_exp_type "{self.__class__.__name__}" empty')
self.EC_index_exp_destdirs = EC_index_exp.Dest_dir.unique()
def get_daily_pickle(self):
exp_type = self.exp_type
today = dt.datetime.now().date()
_result = {"today": today}
if exp_type:
daily_pickle_path = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}.pkl.compress"
)
daily_pkl_options = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}.pkl.compress"
)
)
daily_pkl_options = sorted(
daily_pkl_options, key=lambda x: x.stat().st_ctime
)
_result.update(
{
"daily_path": daily_pickle_path,
"_exists": daily_pickle_path.exists(),
"daily_options": daily_pkl_options,
}
)
if not daily_pkl_options and not self._reload_raw:
self._reload_raw = True
daily_pickle_path_RAW = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}_RAW.pkl.compress"
)
_pickle_path_RAW_read_in = FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{exp_type}_{system()}_RAW_read_in.pkl.compress"
)
daily_pkl_options_RAW = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}_RAW.pkl.compress"
)
)
daily_pkl_options_RAW = sorted(
daily_pkl_options_RAW, key=lambda x: x.stat().st_ctime
)
_result.update(
{
"daily_path_RAW": daily_pickle_path_RAW,
"_raw_exists": daily_pickle_path_RAW.exists(),
"daily_options_RAW": daily_pkl_options_RAW,
"pkl_path_RAW_read_in": _pickle_path_RAW_read_in,
}
)
if "EIS" in exp_type:
daily_pkl_options_RAW_WB = list(
FindExpFolder("VERSASTAT").PostDir.rglob(
f"*_{exp_type}_{system()}_RAW_WB.pkl.compress"
)
)
daily_pkl_options_RAW_WB = sorted(
daily_pkl_options_RAW_WB, key=lambda x: x.stat().st_ctime
)
_result.update(
{
"daily_path_BRUTE": FindExpFolder("VERSASTAT").PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}_BRUTE.pkl.compress"
),
"daily_path_RAW_WB": FindExpFolder(
"VERSASTAT"
).PostDir.joinpath(
f"{today:%Y-%m-%d}_{exp_type}_{system()}_RAW_WB.pkl.compress"
),
"daily_options_RAW_WB": daily_pkl_options_RAW_WB,
}
)
self.daily_pickle_path = _result
def load_delegator(self):
setattr(self, self.exp_type, pd.DataFrame())
if self._reload:
if self._reload_raw:
self.make_raw_pars_from_scratch()
else:
self.read_in_daily_raw()
if hasattr(self, "edit_raw_columns"):
try:
self.edit_raw_columns()
except Exception as e:
_logger.warning(
f'edit_raw_columns in load_delegator "{self.__class__.__name__}" {self.exp_type} failed because {e}'
)
self.save_daily_pars()
else:
self.read_in_daily_pars()
try:
self.extra_stuff_delegator()
except Exception as e:
_logger.warning(
f'extra_stuff_delegator "{self.__class__.__name__}" {self.exp_type} failed because {e}'
)
def _check_class_req_functions(self):
for _f in self._required_funcs:
if not hasattr(self, _f) and "BaseLoadPars" not in self.__class__.__name__:
_logger.warning(
f'Class "{self.__class__.__name__}" is missing required func: "{_f}"'
)
def save_daily_pars(self):
pars = getattr(self, self.exp_type)
pars.to_pickle(self.daily_pickle_path["daily_path"])
_logger.info(
f'{self.exp_type} len({len(pars)}) OVV to daily pickle: {self.daily_pickle_path.get("daily_path")}'
)
def read_in_daily_pars(self):
if self.daily_pickle_path.get("daily_options"):
_pars_fp = self.daily_pickle_path.get("daily_options")[-1]
_logger.info(
f"start read_in_daily_pars {self.exp_type} pars OVV from daily {_pars_fp} "
)
_pars = pd.read_pickle(_pars_fp)
try:
_pars = FileOperations.ChangeRoot_DF(_pars, [], coltype="string")
setattr(self, self.exp_type, _pars)
_logger.info(f"Loaded {self.exp_type} pars OVV from daily {_pars_fp} ")
except Exception as e:
_pars = pd.DataFrame()
_logger.error(
f" ERROR in Loaded {self.exp_type} pars OVV from daily {_pars_fp} {e} "
)
else:
_pars = pd.DataFrame()
_pars_fp = "options empty list"
if _pars.empty:
_logger.error(
f" ERROR in Loaded {self.exp_type} pars OVV from daily {_pars_fp}: empty "
)
def reload_raw_df_delegator(self):
_raw_read_fp = self.daily_pickle_path.get("pkl_path_RAW_read_in")
if _raw_read_fp.exists() and not (self._reload or self._reload_raw):
_pars_RAW_read_in = pd.read_pickle(_raw_read_fp)
setattr(self, f"{self.exp_type}_RAW", _pars_RAW_read_in)
else:
self.generate_raw_df()
self.reload_raw_df()
_pars_RAW_read_in = getattr(self, f"{self.exp_type}_RAW")
_pars_RAW_read_in.to_pickle(_raw_read_fp)
def read_in_daily_raw(self):
_raw_fp = self.daily_pickle_path.get("daily_options_RAW")[-1]
_pars_RAW = pd.read_pickle(_raw_fp)
_pars_RAW.sort_values("source_delta_mtime", inplace=True)
if not "level_0" in _pars_RAW.columns:
_pars_RAW = _pars_RAW.reset_index()
setattr(self, f"{self.exp_type}_RAW", _pars_RAW)
_logger.info(f"Loaded raw df {self.exp_type} from daily {_raw_fp} ")
def save_daily_raw(self):
_pars_RAW = getattr(self, f"{self.exp_type}_RAW")
_pars_RAW.to_pickle(self.daily_pickle_path.get("daily_path_RAW"))
_logger.info(
f'{self.exp_type} OVV to daily pickle: {self.daily_pickle_path.get("daily_path_RAW")}'
)
def set_gen_raw_fls(self):
_par_files = [
list(self.search_pars_files(d)) for d in self.EC_index_exp_destdirs
]
self._par_files = _par_files
if not _par_files:
_logger.warning(f"{self.exp_type} set_gen_raw_fls: list empty ")
self._par_fls_gen = (a for i in self._par_files for a in i)
@func_timer_decorator
def generate_raw_df(self):
if not hasattr(self, "_par_fls_gen"):
self.set_gen_raw_fls()
_pars_lst = list(self.read_in_pars_files(self._par_fls_gen))
try:
_pars_RAW = pd.concat(_pars_lst, sort=False)
except Exception as e:
_pars_RAW = pd.DataFrame()
_logger.warning(f"{self.exp_type} generate_raw_df: {e}")
setattr(self, f"{self.exp_type}_RAW", _pars_RAW)
@staticmethod
def get_source_meta(filepath):
i = filepath
_source_mtime = dt.datetime.fromtimestamp(i.stat().st_mtime)
_delta_mtime = dt.datetime.now() - _source_mtime
_meta_res = {
"sourceFilename": i,
"source_mtime": _source_mtime,
"source_delta_mtime": _delta_mtime,
"sourcebasename": i.stem,
}
return _meta_res
def extra_stuff_delegator(self):
_extra_funcs = [i for i in self.__dict__.keys() if i.startswith("_extra")]
for _func in _extra_funcs:
try:
func = getattr(self, _func)
func()
# self._extra_plotting()
except Exception as e:
_logger.info(
f"{self.__class__.__name__} Extra stuff failed because {e}"
)
def _testing():
tt = EIS_LoadPars(reload=False, reload_raw=False)
tt._reload_raw
self = tt
self.load_delegator()
self.make_raw_pars_from_scratch()
class EIS_LoadPars(BaseLoadPars):
col_names = ["File_SpecFit", "File_SpecRaw", "PAR_file"]
def __init__(
self,
EC_index= | pd.DataFrame() | pandas.DataFrame |
from nltk import ngrams
import collections
import string
import tika
tika.initVM()
import re
from tika import parser
import pandas as pd
import PyPDF2
import os
import shutil
import ast
import numpy as np
import jellyfish
from fuzzywuzzy import fuzz
import dill
import click
from report_pattern_analysis import rec_separate
# ========= Data structures, initializations and hyperparameters
global PREP, PUNC, WORD, DIGI, UNIT
global prepos, punc, units
global threshold, current_document, counter
global learned_patterns, all_patterns, current_patterns, interesting_patterns, fuzzy_patterns
PREP='Prep~'
PUNC='Punc~'
WORD='Word~'
DIGI='Digi~'
UNIT='Unit~'
# ========== utility functions
def remove_files(file_paths):
for file_path in file_paths:
if os.path.exists(file_path):
os.remove(file_path)
def savemodel(model,outfile):
with open(outfile, 'wb') as output:
dill.dump(model, output)
return ''
def loadmodel(infile):
model=''
with open(infile, 'rb') as inp:
model = dill.load(inp)
return model
def ispunc(string):
if re.match('[^a-zA-Z\d]',string):
return True
return False
def break_natural_boundaries(string):
stringbreak=[]
if len(string.split(' ')) > 1:
stringbreak = string.split(' ')
else:
# spl = '[\.\,|\%|\$|\^|\*|\@|\!|\_|\-|\(|\)|\:|\;|\'|\"|\{|\}|\[|\]|]'
alpha = '[A-z]'
num = '\d'
spl='[^A-z\d]'
matchindex = set()
matchindex.update(set(m.start() for m in re.finditer(num + alpha, string)))
matchindex.update(set(m.start() for m in re.finditer(alpha + num, string)))
matchindex.update(set(m.start() for m in re.finditer(spl + alpha, string)))
matchindex.update(set(m.start() for m in re.finditer(alpha + spl, string)))
matchindex.update(set(m.start() for m in re.finditer(spl + num, string)))
matchindex.update(set(m.start() for m in re.finditer(num + spl, string)))
matchindex.update(set(m.start() for m in re.finditer(spl + spl, string)))
matchindex.add(len(string)-1)
matchindex = sorted(matchindex)
start = 0
for i in matchindex:
end = i
stringbreak.append(string[start:end + 1])
start = i+1
return stringbreak
def break_and_split(arr):
new_arr=[]
for token in arr:
new_arr.extend(break_natural_boundaries(token))
return new_arr
def split_pdf_pages(input_pdf_path, target_dir, fname_fmt=u"{num_page:04d}.pdf"):
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if 'doc' in input_pdf_path:
shutil.copyfile(input_pdf_path, (target_dir + "/delete"))
return
with open(input_pdf_path, "rb") as input_stream:
input_pdf = PyPDF2.PdfFileReader(input_stream)
if input_pdf.flattenedPages is None:
# flatten the file using getNumPages()
input_pdf.getNumPages() # or call input_pdf._flatten()
for num_page, page in enumerate(input_pdf.flattenedPages):
output = PyPDF2.PdfFileWriter()
output.addPage(page)
file_name = os.path.join(target_dir, fname_fmt.format(num_page=num_page))
with open(file_name, "wb") as output_stream:
output.write(output_stream)
def levenshtein_similarity(s, t):
""" Levenshtein Similarity """
Ns = len(s);
Nt = len(t);
lev_sim = 1.0 - (jellyfish.levenshtein_distance(s, t)) / float(max(Ns, Nt))
return lev_sim
def word_similarity(s,t, type=''):
if type=='leven':
return levenshtein_similarity(s, t)
else:
return float(fuzz.ratio(s.upper(), t.upper()))/100
# ========== state changing functions
def find_entites(hpattern, mask=[]):
'''
aggrerate the tokens that are next to each other as an entity. Finds multiple entities in a single pattern.
Uses the mask to discount the masked tokens.
:param hpattern:
:param mask:
:return:
'''
if len(mask) == 0:
mask = list(np.full(len(hpattern), True))
entities=[]
entity=''
dummied_hpatteren=list(hpattern)
dummied_hpatteren.append(('~', '~', '~'))
dummied_hpatteren=tuple(dummied_hpatteren)
mask.append(True)
for token, select in zip(dummied_hpatteren, mask):
if not select:
continue
if token[2]==WORD:
entity+=' '+token[0]
else:
if entity!='':
entities.append(entity)
entity = ''
return entities
def find_units(hpattern, mask=[]):
'''
find the units in the pattern
:param hpattern:
:param mask:
:return:
'''
if len(mask) == 0:
mask = list(np.full(len(hpattern), True))
units=[]
for token, select in zip(hpattern,mask):
if not select:
continue
if len(token)>=4 and token[3]==UNIT:
units.append(token[0])
return units
def find_values(instance, hpattern, mask=[]):
'''
find the values in the pattern
:param instance:
:param hpattern:
:param mask:
:return:
'''
values=[]
if len(mask)==0:
mask=list(np.full(len(hpattern),True))
for token_inst,token_patt,select in zip(instance, hpattern, mask):
if not select:
continue
if token_patt[2]==DIGI:
values.append(token_inst)
return values
def find_exact_patterns(hpattern):
'''
finds the hpatterns that are exact to the given hpattern
look by base patterns, as they don't have the variable/value
:param hpattern:
:return:
'''
global current_patterns
exact_pattern_ids=[]
base_pattern=str(get_base_pattern(ast.literal_eval(hpattern)))
if base_pattern in list(current_patterns['base_pattern']):
exact_pattern_ids.append(list(current_patterns[current_patterns['base_pattern']==base_pattern]['pattern_id'])[0])
return exact_pattern_ids
def find_close_patterns(hpattern):
'''
finds the hpatterns that are closest to the given hpattern
:param hpattern:
:return:
'''
global current_patterns
close_pattern_ids=[]
hpattern=ast.literal_eval(hpattern)
entities=find_entites(hpattern)
units=find_units(hpattern)
close_patterns=[]
for _, row in current_patterns.iterrows():
confidence_flag_entity = 0
confidence_flag_unit = 0
confidence=0 # todo: give the best score here; will help decide the rank
hpattern_iter=ast.literal_eval(str(row['hpattern']))
mask = str(row['mask'])
if mask == '':
mask = []
else:
mask = ast.literal_eval(str(row['mask']))
entities_iter=find_entites(hpattern_iter,mask)
units_iter=find_units(hpattern_iter,mask)
for entity_iter in entities_iter:
for entity in entities:
if word_similarity(entity,entity_iter)>0.5:
confidence_flag_entity=1
for unit_iter in units_iter:
for unit in units:
if unit.lower()==unit_iter.lower():
confidence_flag_unit=1
if confidence_flag_entity==1 or confidence_flag_unit==1:
close_patterns.append((row['pattern_id'],confidence_flag_entity,confidence_flag_unit))
# todo: here rank the patterns according to confidence and return the top n
for conf in close_patterns:
close_pattern_ids.append(conf[0])
return close_pattern_ids
def find_far_patterns(entity_name, aliases=[]):
'''
finds the patterns that have similar entity names
:param entity_name:
:return:
'''
global current_patterns
far_pattern_ids=[]
aliases.append(entity_name)
for _, row in current_patterns.iterrows():
mask = str(row['mask'])
if mask == '':
mask = []
else:
mask=ast.literal_eval(str(row['mask']))
hpattern_iter = ast.literal_eval(str(row['hpattern']))
entities_iter = find_entites(hpattern_iter, mask)
for entity_iter in entities_iter:
for alias in aliases:
if word_similarity(alias, entity_iter) > 0.5:
far_pattern_ids.append(row['pattern_id'])
return far_pattern_ids
def matcher_bo_entity(entity_name,seed_aliases):
'''
if the entity name is already present in the learned_patterns, it gets the exact pattern. Then checks if it is present in the current_patterns.
if present then just returns the exact pattern. If not, then finds the closest pattern in current_pattern.
:param entity_name:
:return:
'''
global learned_patterns
global all_patterns
pre_learned_patterns=[]
pre_learned_masks=[]
exact_pattern_ids=[]
exact_masks = {}
close_pattern_ids=[]
far_pattern_ids=[]
# check if the any patterns for the entity have already been identified
if entity_name in list(learned_patterns['entity_name']):
# seed_aliases=str(list(learned_patterns[learned_patterns['entity_name'] == entity_name]['seed_aliases'])[0])
# seed_aliases=seed_aliases.split(',')
pattern_ids=str(list(learned_patterns[learned_patterns['entity_name'] == entity_name]['pattern_ids'])[0])
if pattern_ids!='':
pattern_ids=ast.literal_eval(pattern_ids)
for pattern_id in pattern_ids:
# get the pattern using the id
pre_learned_patterns.append(str(list(all_patterns[all_patterns['pattern_id']==pattern_id]['hpattern'])[0]))
pre_learned_mask=str(list(all_patterns[all_patterns['pattern_id'] == pattern_id]['mask'])[0])
if pre_learned_mask!='':
pre_learned_masks.append(ast.literal_eval(pre_learned_mask))
else:
pre_learned_masks.append([])
# find suitable current patterns
if len(pre_learned_patterns)!=0:
print('We have seen this entity before! Let us find if the exact pattens work...')
for hpattern, mask in zip(pre_learned_patterns, pre_learned_masks):
# check if the exact pattern is present in the current patterns
exact_hpatterns_found=find_exact_patterns(hpattern)
exact_pattern_ids.extend(exact_hpatterns_found)
for pattern_id in exact_hpatterns_found:
exact_masks[pattern_id]=mask
if len(exact_pattern_ids)>0:
print('looks like the entity is present in the same form! Great!')
else:
print('finding patterns closer to learned patterns ...')
for hpattern in pre_learned_patterns:
# find the closest patterns
close_pattern_ids.extend(find_close_patterns(hpattern))
else:
# find the patterns that have similar entity name
print('looks like nothing is close enough is there! Let us just find the closest seeming entity by the name!')
far_pattern_ids.extend(find_far_patterns(entity_name,aliases=seed_aliases))
return exact_pattern_ids, close_pattern_ids, far_pattern_ids, exact_masks
def matcher_bo_value(entity_value):
'''
searches for all the patterns in current_pattern that have the particular value associated with them
:param entity_value:
:return:
'''
global current_patterns
exact_pattern_ids=[]
instance_samples=[] # one instance per pattern
for _, row in current_patterns.iterrows():
instances=ast.literal_eval(str(row['instances']))
for instance in instances:
if entity_value in instance:
exact_pattern_ids.append(row['pattern_id'])
instance_samples.append(instance)
break
return exact_pattern_ids, instance_samples
def parse_document(file_path):
parsed_text=[]
# create a dir for dumping split pdfs
if os.path.exists('./temp'):
shutil.rmtree('./temp/')
else:
os.mkdir('./temp')
split_pdf_pages(file_path, 'temp')
for pdf_page in os.listdir('temp'):
# print('processing page: ',pdf_page)
parsed = parser.from_file(os.path.join('temp', pdf_page))
try:
pdftext = parsed['content']
except Exception:
print("Could not read file.")
pdftext=''
parsed_text.append(pdftext)
return parsed_text
def filter1(row):
'''
Returns True if the pattern satisfies a certain criteria, else False
:param row:
:return:
'''
global threshold
# if the pattern occurs in the document less than the threshold then return false
if int(row['num_instances'])>threshold:
return True
return False
def filter2(row):
'''
Returns True if the pattern satisfies a certain criteria, else False
:param row:
:return:
'''
pattern=ast.literal_eval(str(row['hpattern']))
# if the first token is preposition/pronoun or punctuation then return false
if pattern[0][2] ==PREP or pattern[0][2] ==PUNC:
return False
return True
def filter3(row):
'''
Returns True if the pattern satisfies a certain criteria, else False
:param row:
:return:
'''
pattern=ast.literal_eval(str(row['hpattern']))
for token in pattern:
# if atleast one entity/unit found, it is okay
if token[2] == WORD:
return True
return False
def filter4(row):
'''
Returns True if the pattern satisfies a certain criteria, else False
:param row:
:return:
'''
pattern=ast.literal_eval(str(row['hpattern']))
for token in pattern:
# if atleast one number found, it is okay
if token[2] == DIGI:
return True
return False
def apply_filters(fltr):
'''
Apply filters to remove 'irrelevant' current patterns: see filter1 impl
:param: fltr: a function
:return:
'''
global current_patterns
current_patterns=current_patterns[current_patterns.apply(lambda x: fltr(x), axis=1)]
print('FILTERED! now number of patterns: ', len(current_patterns))
def getID():
global counter
counter+=1
return counter
def get_base_pattern(hpattern):
'''
takes the second level of an hpattern (non variable tokens)
:param hpattern:
:return:
'''
base_pattern=[]
for patt in hpattern:
base_pattern.append(patt[1])
return tuple(base_pattern)
def create_hpattern(instance):
'''
creates a heirarchy of 'denominations/classes' for each base pattern
:param instance:
:return: base_pattern, h_pattern
'''
global punc
global prepos
global units
signature = []
for token in instance:
if token in prepos:
signature.append((token, token, PREP))
elif token.isnumeric():
signature.append((token, DIGI, DIGI))
elif token.isalpha():
sign=[token, token, WORD]
if token.lower() in units:
sign.append(UNIT)
signature.append(tuple(sign))
elif ispunc(token):
signature.append((token, token, PUNC))
else:
signature.append((token))
return tuple(signature)
def create_patterns_per_doc(parsed_text):
'''
:param parsed_text: it should be a list of texts. One item/text for every page in the document.
:return:
'''
global current_patterns
global current_document
instance_order_temp=0
all_hpatterns=[]
all_base_patterns=[]
all_instances = []
all_instances_orders = []
for page in parsed_text:
page_hpatterns=[]
page_base_patterns=[]
page_instances = []
for line in page.split('\n'): # pattern analysis is done based on each line
# create chunks by dividing on commas+space, period+space (and multi-space??) so that patterns don't span beyond them
# chunks=re.split(', |\. |\s{2,}',line)
chunks = re.split(', |\. |;', line.lower())
# print(line, chunks)
# remove commas from numbers (8,643), give valid spacing around #, = and @
# tokenize everything based on spaces/tabs
# creates a list(chunk) of lists(tokens): [[token,token,token],[token,token]]
chunks = [
chunk.replace(",", "").replace("=", " = ").replace("@", " @ ").replace("#", " # ").replace("$", " $ ").
replace("°", " ° ").replace("%", " % ").replace("\"", " \" ").replace("'", " ' ").replace(":",
" : ").split()
for chunk in chunks]
# separate the tokens further using the natural seperation boundaries
chunks = [break_and_split(chunk) for chunk in chunks]
chunks_base_patterns=[]
chunks_hpatterns=[]
for chunk in chunks:
# convert each chunk to base pattern and hpattern
hpattern=create_hpattern(chunk)
base_pattern=get_base_pattern(hpattern)
chunks_base_patterns.append(base_pattern)
chunks_hpatterns.append(hpattern)
# create n-grams
n_gram_range = (3, 4, 5, 6, 7)
for n in n_gram_range:
all_grams_base_patterns = list(map(lambda x: list(ngrams(x, n)), chunks_base_patterns))
all_grams_hpatterns = list(map(lambda x: list(ngrams(x, n)), chunks_hpatterns))
all_grams = list(map(lambda x: list(ngrams(x, n)), chunks))
# flatten the nested list
all_grams_base_patterns = [item for sublist in all_grams_base_patterns for item in sublist]
all_grams_hpatterns = [item for sublist in all_grams_hpatterns for item in sublist]
all_grams = [item for sublist in all_grams for item in sublist]
page_base_patterns.extend(all_grams_base_patterns)
page_hpatterns.extend(all_grams_hpatterns)
page_instances.extend(all_grams)
all_base_patterns.append(page_base_patterns)
all_hpatterns.append(page_hpatterns)
all_instances.append(page_instances)
all_instances_orders.append(list(range(instance_order_temp, instance_order_temp + len(page_instances))))
instance_order_temp+=len(page_instances)
all_page_numbers=[]
for indx, _ in enumerate(all_instances):
all_page_numbers.append(list(np.full(len(_),indx+1)))
all_base_patterns_flattened=[item for sublist in all_base_patterns for item in sublist]
all_hpatterns_flattened = [item for sublist in all_hpatterns for item in sublist]
all_instances_flattened = [item for sublist in all_instances for item in sublist]
all_page_numbers_flattened=[item for sublist in all_page_numbers for item in sublist]
all_instances_orders_flattened=[item for sublist in all_instances_orders for item in sublist]
counted_patterns = collections.Counter(all_base_patterns_flattened)
# ======= get the longest pattern with the same support (keeps only the superset, based on minsup criteria)
# todo: check if works correctly
filtered_patterns = {}
for pattern in counted_patterns.keys():
# create the ngrams/subsets of a set and check if they are already present, if so check minsup and delete
len_pattern = len(pattern)
filtered_patterns[pattern] = counted_patterns[pattern]
for i in range(1, len_pattern):
# create all size sub patterns/n-grams
subpatterns = list(ngrams(pattern, i))
for subpattern in subpatterns:
if subpattern in filtered_patterns.keys() and filtered_patterns[subpattern] == counted_patterns[pattern]:
# delete subpattern
# print('deleting',subpattern,', because: ', pattern, filtered_pattens[subpattern], counted[pattern])
filtered_patterns.pop(subpattern)
# ========== create data frame
# aggregate the instances based on base patterns
# create a mapping from base pattern to hpattern
aggregated_pattern_instance_mapping={}
aggregated_pattern_pagenumber_mapping={}
aggregated_pattern_order_mapping = {}
base_pattern_to_hpattern={}
for pattern, hpattern, instance, page_number, instance_order in zip(all_base_patterns_flattened, all_hpatterns_flattened, all_instances_flattened,all_page_numbers_flattened, all_instances_orders_flattened):
# aggregate
if pattern not in aggregated_pattern_instance_mapping.keys():
aggregated_pattern_instance_mapping[pattern]=[]
aggregated_pattern_pagenumber_mapping[pattern]=[]
aggregated_pattern_order_mapping[pattern]=[]
aggregated_pattern_instance_mapping[pattern].append(instance)
aggregated_pattern_pagenumber_mapping[pattern].append(page_number)
aggregated_pattern_order_mapping[pattern].append(instance_order)
# mapping
if pattern not in base_pattern_to_hpattern.keys():
base_pattern_to_hpattern[pattern]=hpattern
for pattern in aggregated_pattern_instance_mapping.keys():
if pattern in filtered_patterns:
pattern_id=getID()
current_patterns=current_patterns.append({'pattern_id':pattern_id,'base_pattern':str(pattern),'instances':str(aggregated_pattern_instance_mapping[pattern]),
'page_numbers':str(aggregated_pattern_pagenumber_mapping[pattern]),'instances_orders':str(aggregated_pattern_order_mapping[pattern]),'hpattern':str(base_pattern_to_hpattern[pattern]),'document_name':current_document,'num_instances':str(counted_patterns[pattern])}, ignore_index=True)
# ============= apply filters
# filter the patterns that have the number of instances below a certain threshold
apply_filters(filter1)
# remove the ones that start with a punctuation or preposition
apply_filters(filter2)
# remove the patterns that have only punctuations, prepositions and numbers
apply_filters(filter3)
# remove the ones that have no numbers
apply_filters(filter4)
current_patterns = current_patterns.replace(np.nan, '', regex=True)
current_patterns.to_csv('current_patterns.csv')
def find_interesting_patterns():
'''
using the list of other patterns, find the matching patterns from the current document
:param patterns:
:return:
'''
global interesting_patterns
def init(file_path, fresh=False):
'''
initialize and load all the relevant dataframes and datastructures
:param file_path
:param fresh : if True then initialize everything anew
:return:
'''
global prepos, punc, units
global threshold, current_document_path, counter
global learned_patterns, all_patterns, current_patterns, other_patterns, other_pattern_instances
prepos = ['aboard', 'about', 'above', 'across', 'after', 'against', 'along', 'amid', 'among', 'anti', 'around',
'as',
'at', 'before', 'behind', 'below', 'beneath', 'beside', 'besides', 'between', 'beyond', 'but', 'by',
'concerning', 'considering', 'despite', 'down', 'during', 'except', 'excepting', 'excluding', 'following',
'for', 'from', 'in', 'inside', 'into', 'like', 'minus', 'near', 'of', 'off', 'on', 'onto', 'opposite',
'outside',
'over', 'past', 'per', 'plus', 'regarding', 'round', 'save', 'since', 'than', 'through', 'to', 'toward',
'towards',
'under', 'underneath', 'unlike', 'until', 'up', 'upon', 'versus', 'via', 'with', 'within', 'without',
'and', 'or']
units = ['ft', 'gal', 'ppa', 'psi', 'lbs', 'lb', 'bpm', 'bbls', 'bbl', '\'', "\"", "'", "°", "$", 'hrs']
punc = set(string.punctuation)
if_seen_document_before=False
threshold = 6
# save state across documents
if os.path.exists('counter'):
counter=loadmodel('counter')
else:
counter = 0
print('counter',counter)
current_document_path = ''
global current_document
current_document = file_path.split('/')[-1]
# entity matchings for all the documents processed so far
if os.path.exists('learned_patterns.csv'):
learned_patterns = | pd.read_csv('learned_patterns.csv', index_col=0) | pandas.read_csv |
import requests
import re
import bs4
import pandas as pd
import time
import pandas as pd
url = 'https://funddb.cn/tool/energy'
header={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"}
req = requests.get(url,headers=header)
html=req.content.decode("utf-8")
data_proto=[]
requests.status_codes
requests.adapters.DEFAULT_RETRIES = 1
header = {"Connection": "close",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"}
def get_data_proto(page):
for i in page:
print(i)
url = 'https://funddb.cn/tool/energy'
try:
req = requests.get(url, headers=header)
html = req.content.decode("utf-8")
# data = re.findall(""""DRZJLR":-?\d+\.?\d*""", html)
data_page = re.findall(r"\{[^{}]*\}", html)
data_proto.append(data_page)
except:
print("Connection refused by the server..")
print("Let me sleep for 5 seconds")
print("ZZzzzz...")
time.sleep(5)
print("Was a nice sleep, now let me continue...")
req = requests.get(url, headers=header)
html = req.content.decode("utf-8")
#data = re.findall(""""DRZJLR":-?\d+\.?\d*""", html)
data_page=re.findall(r"\{[^{}]*\}",html)
data_proto.append(data_page)
return data_proto
# # time=re.compile(""""LSZJLR":^[0-9]*$""")
# time=re.compile(r"\d+\.?\d*")
# time="LSZJLR"+"-?\d+\.?\d*"
# data=re.findall(""""DRZJLR":-?\d+\.?\d*""",html)
if __name__=='__main__':
aa=get_data_proto(range(1,3))
flag=1
res = {}
for i in aa:
for ii in i:
res0={}
print(ii)
print(re.findall('"DetailDate":"?\d*-?\d*-?\d*',ii))
date=re.findall('"DetailDate":"?\d*-?\d*-?\d*',ii)[0]
year=date[14:18]
month=date[19:21]
day=date[22:24]
money_flow_in=re.findall(""""DRZJLR":(?<=:).*?(?=,)""",ii)[0][9:]
money_deal=re.findall(""""DRCJJME":(?<=:).*?(?=,)""",ii)[0][10:]
money_rest_of_day=re.findall(""""DRYE":(?<=:).*?(?=,)""",ii)[0][7:]
money_deal_cumulant=re.findall(""""LSZJLR":(?<=:).*?(?=,)""",ii)[0][9:]
shanghai_index=re.findall(""""SSEChange":(?<=:).*?(?=,)""",ii)[0][12:]
deal_out = re.findall(""""MCCJE":(?<=:).*?(?=,)""", ii)[0][8:]
deal_in = re.findall(""""MRCJE":(?<=:).*?(?=,)""", ii)[0][8:]
shanghai_index_change_percent = re.findall(""""SSEChangePrecent":(?<=:).*?(?=})""", ii)[0][19:]
# shanghai_index_change_percent=re.findall(""""SSEChangePrecent":-?\d+\.?\d*""", ii)[0][19:]
stock_first=re.findall('"LCG":"(?<=").*?(?=")"', ii)[0][6:]
stock_first_change_percent=re.findall(""""LCGZDF":(?<=:).*?(?=,)""", ii)[0][9:]
stock_first_number=re.findall('"LCGCode":"(?<=").*?(?=")"', ii)[0][11:-1]
res0['年']=year
res0['月'] = month
res0['日']=day
res0['当日净流入']=float(money_flow_in)
res0['当日成交净买额']=float(money_deal)
res0['历史累积净买额']=float(money_deal_cumulant)
res0['当日余额'] = float(money_rest_of_day)
res0['上证指数'] = float(shanghai_index)
res0['卖出成交额'] = float(deal_out)
res0['买入成交额'] = float(deal_in)
res0['卖出成交额'] = float(deal_out)
res0['上证指数涨幅'] = float(shanghai_index_change_percent)
res0['领涨股'] =stock_first
res0['领涨股涨幅'] = float(stock_first_change_percent)
res0['领涨股代码'] = float(stock_first_number)
res[flag]=res0
flag=flag+1
zz= | pd.DataFrame(res) | pandas.DataFrame |
def GetRoutes(area):
query = 'select distinct Route from [dimensions].[wells] WITH (NOLOCK) where [Area] = \'' + area + '\''
return query
def GetWells(area, route):
query = 'select distinct WellName from [dimensions].[wells] WITH (NOLOCK) where [Route] = \'' + route + '\' and [Area] = \'' + area + '\''
return query
def ScenarioQuery(scenario, corpID, start_date, end_date):
start_date = StringifyDates(start_date)
end_date = StringifyDates(end_date)
if corpID[0] != 'ALL':
in_clause = FormulateInClause(corpID, 'CORPORATE_ID')
start_date = StringifyDates(start_date)
end_date = StringifyDates(end_date)
query = 'select S349, S370 as OilProduction, S376, S371 as GasProduction, C370, C371, C376, C753, C754, API10, API12, API14, WFLAC8 as WellFlac, wflac6, OUTDATE as Date, CORPORATE_ID as CorpID, m.LastUpdated'\
' from aries.ac_monthly m WITH (NOLOCK)' \
' left join aries.AC_PROPERTY p WITH (NOLOCK)'\
' on m.PROPNUM = p.PROPNUM '\
' left join aries.AC_ONELINE o WITH (NOLOCK)'\
' on m.scenario = o.scenario and m.PROPNUM = o.PROPNUM' + in_clause + ''\
' and m.scenario = \'' + scenario + '\' and OUTDATE >= \'' + start_date + '\' and OUTDATE <= \'' + end_date + '\''
else:
query = 'select S349, S370, S376, S371, C370 as OilProduction, C371 as GasProduction, C376, C753, C754, API10, API12, API14, WFLAC8 as WellFlac, wflac6, OUTDATE as Date, CORPORATE_ID as CorpID, m.LastUpdated'\
' from aries.ac_monthly m WITH (NOLOCK)' \
' left join aries.AC_PROPERTY p WITH (NOLOCK)'\
' on m.PROPNUM = p.PROPNUM'\
' left join aries.AC_ONELINE o WITH (NOLOCK)'\
' on m.scenario = o.scenario and m.PROPNUM = o.PROPNUM'\
' where m.scenario = \'' + scenario + '\' and OUTDATE >= \'' + start_date + '\' and OUTDATE <= \'' + end_date + '\''
return query
def EDWKeyQueryFromCorpID(corpid_list, Area):
in_clause = FormulateInClause(corpid_list, 'CorpID')
query = 'select WellName, CorpID, API'\
' from [dimensions].[wells] WITH (NOLOCK) ' + in_clause
if Area:
query = query + ' and Area = \'' + Area + '\''
return query
def EDWKeyQueryFromWellName(wellname_list):
in_clause = FormulateInClause(wellname_list, 'WellName')
query = 'select WellName, CorpID, API'\
' from [dimensions].[wells] WITH (NOLOCK) ' + in_clause
return query
def EDWKeyQueryFromWellFlac(wellflac_list):
in_clause = FormulateInClause(wellflac_list, 'Wellflac')
query = 'select WellName, CorpID, Wellflac, API'\
' from [dimensions].[wells] WITH (NOLOCK) ' + in_clause
return query
def EDWKeyQueryFromAPI(API_list):
in_clause = FormulateInClause(API_list, 'API')
query = 'select WellName, CorpID, API'\
' from [dimensions].[wells] WITH (NOLOCK) ' + in_clause
return query
def GetActualsFromDB(corpid_list, start_date, end_date):
in_clause = FormulateInClause(corpid_list, 'CorpID')
query = 'select Oil'\
', Gas'\
', BOE'\
', Water'\
', MeasuredOil'\
', MeasuredGas'\
', MeasuredWater'\
', DateKey as Date_Key'\
', CorpID'\
' from [facts].[production] p WITH (NOLOCK) '\
' join [dimensions].[wells] w WITH (NOLOCK) '\
' on p.Wellkey = w.Wellkey ' + in_clause + ''\
' and DateKey >= ' + start_date + ' and DateKey <= ' + end_date
return query
def GetGFOFromEastDB2019(WellName_FieldName, start_date, end_date):
if WellName_FieldName[0] != 'ALL':
in_clause = FormulateInClause(WellName_FieldName, 'i.[WellName_FieldName]')
else:
in_clause = ''
query = 'select distinct gfo.[WellName_FieldName] as WellName'\
', [2019Zmcfd]'\
', [2019ZNF] as NettingFactor'\
', [Date]'\
' from [TeamOptimizationEngineering].[dbo].[GFOEast2019PlanTable] gfo WITH (NOLOCK)'\
' join [TeamOptimizationEngineering].[dbo].[GFOEastInputTable] i WITH (NOLOCK)'\
' on gfo.[WellName_FieldName] = i.[WellName_FieldName] ' + in_clause + ' and [Date] >= \'' + start_date + '\' and [Date] <= \'' + end_date + '\''
return query
def GetGFOFromEastDB2018(WellName_FieldName, start_date, end_date):
if WellName_FieldName[0] != 'ALL':
in_clause = FormulateInClause(WellName_FieldName, 'i.[WellName_FieldName]')
else:
in_clause = ''
query = 'select distinct gfo.[WellName_FieldName]'\
', [2018Zmcfd]'\
', [2018ZNF] as NettingFactor'\
', [Date]'\
' from [TeamOptimizationEngineering].[dbo].[GFOEast2018PlanTable] gfo WITH (NOLOCK)'\
' join [TeamOptimizationEngineering].[dbo].[GFOEastInputTable] i WITH (NOLOCK)'\
' on gfo.[WellName_FieldName] = i.[WellName_FieldName] ' + in_clause + ' and [Date] >= \'' + start_date + '\' and [Date] <= \'' + end_date + '\''
return query
def GetNettingFactorsfromDB(wellname_list):
query = 'select WellName, Wellflac, NF, NRI, FirstSalesDateInput '\
'from [TeamOptimizationEngineering].[dbo].[GFOEastInputTable] ' \
if wellname_list:
in_clause = FormulateInClause(wellname_list, 'WellName')
query = query + ' ' + in_clause
return query
def FormulateInClause(item_list, column_name):
in_clause = 'where ' + str(column_name) + ' in ('
count = 1
for item in item_list:
if count == len(item_list) and item:
in_clause = in_clause + '\'' + str(item) + '\')'
elif item:
in_clause = in_clause + '\'' + str(item) + '\', '
count = count + 1
return in_clause
def ColumnQuery(table_name):
query = 'select column_name from information_schema.columns where table_name = \'' + table_name + '\''
return query
def RouteQuery(route_list):
in_clause = FormulateInClause(route_list, 'route')
query = 'select distinct name, apiNumber, wellflac from enbase.asset ' + in_clause + ' and apiNumber is not null and wellFlac is not null'
return query
def FirstProductionDateQuery(corpid_list):
in_clause = FormulateInClause(corpid_list, 'CorpID')
query = 'select top 1 FirstProductionDate from Dimensions.Wells WITH (NOLOCK) ' + in_clause + ' and FirstProductionDate is not null order by FirstProductionDate Asc'
return query
def GetActenumDrillScheduleData(start_date, end_date):
import datetime
query = ' select S_API as API,' \
' S_Name as WellName,' \
' N_LateralLength as LateralLength,' \
' N_ExpectedStages as ExpectedStages,' \
' T_StartFracWell as StartFracDate,' \
' T_FinishFracWell as EndFracDate,' \
' N_SurfaceLatitude as SurfaceLatitude,' \
' N_SurfaceLongitude as SurfaceLongitude,' \
' N_BottomHoleLatitude as BottomHoleLatitude,' \
' N_BottomHoleLongitude as BottomHoleLongitude,' \
' framework_valid_from as UpdateDate' \
' from [bpx_actenum].[wells.asis] a' \
' where T_StartFracWell > \'' + start_date + '\' and T_StartFracWell < \'' + end_date + '\'' \
' and N_SurfaceLatitude is not null' \
' and N_SurfaceLongitude is not null' \
' and N_BottomHoleLatitude is not null' \
' and N_BottomHoleLongitude is not null' \
' and DELETED = 0' \
' and S_API is not null' \
' and S_API <> \'\''
return query
def GetWellsWithinBearing(lat, lon, distance):
str_lat = str(lat)
str_long = str(lon)
str_dist = str(distance)
query = 'select [WELL_NAME]'\
', [UWI]'\
', [SURFACE_LATITUDE] as SurfaceLatitude'\
', [SURFACE_LONGITUDE] as SurfaceLongitude'\
', [BOTTOM_HOLE_LATITUDE] as BottomHoleLatitude'\
', [BOTTOM_HOLE_LONGITUDE] as BottomHoleLongitude'\
', e_var.DISTANCE'\
' FROM [bpx_tdm].[WELL.asis]'\
' cross apply (select (RADIANS(([SURFACE_LONGITUDE]) - ('+str_long+'))) as dlon) as a_var'\
' cross apply (select (RADIANS(([SURFACE_LATITUDE]) - ('+str_lat+'))) as dlat) as b_var'\
' cross apply (select (POWER(sin(dlat/2), 2) + cos(RADIANS('+str_lat+')) * cos(RADIANS([SURFACE_LATITUDE])) * POWER(sin(dlon/2), 2)) as a) as c_var'\
' cross apply (select (2 * atn2( sqrt(a), sqrt(1-a) )) as c) as d_var'\
' cross apply (select (3958.8 * 5280 * c) as DISTANCE) as e_var'\
' where e_var.DISTANCE < ' + str_dist + ''\
' and [SURFACE_LATITUDE] is not null'\
' and [SURFACE_LONGITUDE] is not null'\
' and [BOTTOM_HOLE_LATITUDE] is not null'\
' and [BOTTOM_HOLE_LATITUDE] is not null'
return query
def StringifyDates(date):
import pandas as pd
if not isinstance(date, str):
date = | pd.to_datetime(date) | pandas.to_datetime |
import torch
import pathlib
import pandas as pd
import pytorch_lightning as pl
from datetime import datetime
from collections import OrderedDict
class CSVLogger(pl.Callback):
"""Custom metric logger and model checkpoint."""
def __init__(self, output_path=None):
super(CSVLogger, self).__init__()
self._epoch = None
if output_path is None:
self.logger_path = None
else:
self.logger_path = pathlib.Path(output_path)
self.logger_path.mkdir(parents=True, exist_ok=True)
def metrics(self, interval):
if interval == 'epoch':
return self.epoch_metrics
elif interval in ['step', 'batch']:
return self.batch_metrics
@property
def batch_metrics(self):
metrics_path = self.logger_path / 'metrics_batch.csv'
return pd.read_csv(metrics_path)
@property
def epoch_metrics(self):
metrics_path = self.logger_path / 'metrics_epoch.csv'
return pd.read_csv(metrics_path)
def _extract_metrics(self, trainer, interval):
metrics = trainer.callback_metrics
metric_keys = list(metrics.keys())
data_dict = OrderedDict()
if interval == 'epoch':
metric_keys.remove('epoch')
data_dict['epoch'] = metrics['epoch']
data_dict['time'] = str(datetime.now())
elif interval in ['step', 'batch']:
remove_list = ['train', 'val', 'epoch']
for m in metrics.keys():
if any(sub in m for sub in remove_list):
metric_keys.remove(m)
data_dict[interval] = trainer.global_step
for k in metric_keys:
if isinstance(metrics[k], dict):
for j in metrics[k].keys():
data_dict[j] = metrics[k][j]
else:
data_dict[k] = metrics[k]
# cleanup
for k in data_dict.keys():
try:
data_dict[k] = float(data_dict[k].cpu())
except Exception:
pass
return data_dict
def _log_csv(self, trainer, metrics_path, interval):
data_dict = self._extract_metrics(trainer, interval)
new_metrics = pd.DataFrame.from_records([data_dict], index=interval)
if metrics_path.exists():
config = dict(header=False, mode='a')
old_metrics = self.metrics(interval).set_index(interval)
if not new_metrics.columns.equals(old_metrics.columns):
new_metrics = pd.concat([old_metrics, new_metrics])
config = dict(header=True, mode='w')
else:
config = dict(header=True, mode='w')
new_metrics.to_csv(metrics_path, **config)
def on_init_start(self, trainer):
"""Called when the trainer initialization begins, model has not yet been set."""
pass
def on_init_end(self, trainer):
"""Called when the trainer initialization ends, model has not yet been set."""
if self.logger_path is None:
checkpoint_path = trainer.checkpoint_callback.dirpath
# checkpoint_path = trainer.logger.log_dir
self.logger_path = checkpoint_path.parent / 'logging'
self.logger_path.mkdir(parents=True, exist_ok=True)
def on_batch_end(self, trainer, pl_module):
"""Called when the training batch ends."""
if trainer.global_step > 1:
metrics_path = self.logger_path / 'metrics_batch.csv'
self._log_csv(trainer, metrics_path, interval='batch')
def on_epoch_end(self, trainer, pl_module):
"""Called when the epoch ends."""
metrics_path = self.logger_path / 'metrics_epoch.csv'
self._log_csv(trainer, metrics_path, interval='epoch')
def on_sanity_check_start(self, trainer, pl_module):
"""Called when the validation sanity check starts."""
pass
def on_sanity_check_end(self, trainer, pl_module):
"""Called when the validation sanity check ends."""
pass
def on_epoch_start(self, trainer, pl_module):
"""Called when the epoch begins."""
pass
def on_batch_start(self, trainer, pl_module):
"""Called when the training batch begins."""
pass
def on_validation_batch_start(self, trainer, pl_module):
"""Called when the validation batch begins."""
pass
def on_validation_batch_end(self, trainer, pl_module):
"""Called when the validation batch ends."""
pass
def on_test_batch_start(self, trainer, pl_module):
"""Called when the test batch begins."""
pass
def on_test_batch_end(self, trainer, pl_module):
"""Called when the test batch ends."""
pass
def on_train_start(self, trainer, pl_module):
"""Called when the train begins."""
pass
def on_train_end(self, trainer, pl_module):
"""Called when the train ends."""
pass
def on_validation_start(self, trainer, pl_module):
"""Called when the validation loop begins."""
pass
def on_validation_end(self, trainer, pl_module):
"""Called when the validation loop ends."""
pass
def on_test_start(self, trainer, pl_module):
"""Called when the test begins."""
pass
def on_test_end(self, trainer, pl_module):
"""Called when the test ends."""
pass
class PandasLogger(pl.Callback):
"""PandasLogger metric logger and model checkpoint."""
def __init__(self, save_path=None):
super(PandasLogger, self).__init__()
self.batch_metrics = pd.DataFrame()
self.epoch_metrics = pd.DataFrame()
self._epoch = 0
def _extract_metrics(self, trainer, interval):
metrics = trainer.callback_metrics
metric_keys = list(metrics.keys())
data_dict = OrderedDict()
# setup required metrics depending on interval
if interval == 'epoch':
if interval in metric_keys:
metric_keys.remove('epoch')
data_dict['epoch'] = metrics['epoch']
else:
data_dict['epoch'] = self._epoch
data_dict['time'] = str(datetime.now())
self._epoch += 1
elif interval in ['step', 'batch']:
remove_list = ['train', 'val', 'epoch']
for m in metrics.keys():
if any(sub in m for sub in remove_list):
metric_keys.remove(m)
data_dict[interval] = trainer.global_step
# populate ordered dictionary
for k in metric_keys:
if isinstance(metrics[k], dict):
continue
else:
data_dict[k] = float(metrics[k])
# dataframe with a single row (one interval)
metrics = pd.DataFrame.from_records([data_dict], index=interval)
return metrics
def on_batch_end(self, trainer, pl_module):
"""Called when the training batch ends."""
if trainer.global_step > 0:
new_metrics = self._extract_metrics(trainer, 'batch')
self.batch_metrics = pd.concat([self.batch_metrics, new_metrics])
def on_epoch_end(self, trainer, pl_module):
"""Called when the epoch ends."""
new_metrics = self._extract_metrics(trainer, 'epoch')
self.epoch_metrics = | pd.concat([self.epoch_metrics, new_metrics]) | pandas.concat |
import os
import re
from os import path
import numpy as np
import pandas as pd
from scipy.stats import norm
data_dir = path.abspath(path.join(path.dirname(__file__), "..", "data"))
def _shift_turbine_curve(turbine_curve, hub_height, maxspd, new_curve_res):
"""Shift a turbine curve based on a given hub height.
:param pandas.Series turbine_curve: power curve data, wind speed index.
:param float hub_height: height to shift power curve to.
:param float maxspd: Extent of new curve (m/s).
:param float new_curve_res: Resolution of new curve (m/s).
"""
wspd_height_base = 262.467 # 80m in feet
wspd_exp = 0.15 # wspd(h) = wspd_0 * (h / h0)**wspd_exp
curve_x = np.arange(0, maxspd + new_curve_res, new_curve_res)
wspd_scale_factor = (wspd_height_base / hub_height) ** wspd_exp
shifted_x = turbine_curve.index * wspd_scale_factor
shifted_curve = np.interp(curve_x, shifted_x, turbine_curve, left=0, right=0)
shifted_curve = pd.Series(data=shifted_curve, index=curve_x)
shifted_curve.index.name = "Speed bin (m/s)"
return shifted_curve
def build_state_curves(form_860, power_curves, maxspd=30, default="IEC class 2", rsd=0):
"""Parse Form 860 and turbine curves to obtain average state curves.
:param pandas.DataFrame form_860: EIA Form 860 data.
:param pandas.DataFrame power_curves: turbine power curves.
:param float maxspd: maximum x value for state curves.
:param str default: turbine curve name for turbines not in power_curves.
:param float rsd: relative standard deviation for spatiotemporal smoothing.
:return: (*pandas.DataFrame*) - DataFrame of state curves.
"""
print("building state_power_curves")
mfg_col = "Predominant Turbine Manufacturer"
model_col = "Predominant Turbine Model Number"
capacity_col = "Nameplate Capacity (MW)"
hubheight_col = "Turbine Hub Height (Feet)"
new_curve_res = 0.01 # resolution: m/s
states = form_860["State"].unique()
curve_x = np.arange(0, maxspd + new_curve_res, new_curve_res)
state_curves = pd.DataFrame(curve_x, columns=["Speed bin (m/s)"])
for s in states:
cumulative_curve = np.zeros_like(curve_x)
cumulative_capacity = 0
state_wind_farms = form_860[form_860["State"] == s]
for i, f in enumerate(state_wind_farms.index):
# Look up attributes from Form 860
farm_capacity = state_wind_farms[capacity_col].iloc[i]
hub_height = state_wind_farms[hubheight_col].iloc[i]
turbine_mfg = state_wind_farms[mfg_col].iloc[i]
turbine_model = state_wind_farms[model_col].iloc[i]
# Look up turbine-specific power curve (or default)
turbine_name = " ".join([turbine_mfg, turbine_model])
if turbine_name not in power_curves.columns:
turbine_name = default
turbine_curve = power_curves[turbine_name]
# Shift based on farm-specific hub height
shifted_curve = _shift_turbine_curve(
turbine_curve, hub_height, maxspd, new_curve_res
)
# Add to cumulative totals
cumulative_curve += shifted_curve.to_numpy() * farm_capacity
cumulative_capacity += farm_capacity
# Normalize based on cumulative capacity
state_curves[s] = cumulative_curve / cumulative_capacity
state_curves.set_index("Speed bin (m/s)", inplace=True)
# Add an 'Offshore' state with a representative curve
hub_height = 393.701 # 120 meters, in feet to match Form860 data
turbine_curve = power_curves["Vestas V164-8.0"]
shifted_curve = _shift_turbine_curve(
turbine_curve, hub_height, maxspd, new_curve_res
)
state_curves["Offshore"] = shifted_curve.to_numpy()
offshore_rsd = 0.25
if rsd > 0:
smoothed_state_curves = pd.DataFrame(
index=state_curves.index, columns=state_curves.columns
)
for s in state_curves.columns:
xs = state_curves.index
ys = np.zeros_like(xs)
for i, x in enumerate(xs):
if x == 0:
continue
if s == "Offshore":
sd = max(1.5, offshore_rsd * x)
else:
sd = max(1.5, rsd * x)
min_point = x - 3 * sd
max_point = x + 3 * sd
sample_points = np.logical_and(xs > min_point, xs < max_point)
cdf_points = norm.cdf(xs[sample_points], loc=x, scale=sd)
pdf_points = np.concatenate((np.zeros(1), np.diff(cdf_points)))
ys[i] = np.dot(pdf_points, state_curves[s][sample_points])
smoothed_state_curves[s] = ys
state_curves = smoothed_state_curves
return state_curves
def get_form_860(data_dir, year=2016):
"""Read data for EIA Form 860.
:param str data_dir: data directory.
:param int year: EIA data year to get.
:return: (*pandas.DataFrame*) -- dataframe with Form 860 data.
"""
if not isinstance(data_dir, str):
raise TypeError("data_dir is not a str")
if not path.isdir(data_dir):
raise ValueError("data_dir is not a valid directory")
if not isinstance(year, int):
raise TypeError("year is not an int")
regex_str = r"3_2_Wind_Y(\d{4}).csv"
valid_years = [
int(re.match(regex_str, f).group(1))
for f in os.listdir(data_dir)
if re.match(regex_str, f)
]
if year not in valid_years:
err_msg = "form data for year {year} not found. ".format(year=year)
err_msg += "Years with data: " + ", ".join(str(valid_years))
raise ValueError(err_msg)
form_860_filename = "3_2_Wind_Y{year}.csv".format(year=year)
form_860_path = path.join(data_dir, form_860_filename)
form_860 = | pd.read_csv(form_860_path, skiprows=1) | pandas.read_csv |
#----------------------------------------------------------
#importing Neccessary libraries
import pandas as pd
import os.path
from os import path
from datetime import date
#----------------------------------------------------------
#Important functions
def enter_record():
n='y'
while n=='y':
s=str(date.today())
k=int(input('enter cost '))
data=[s,k]
df= | pd.DataFrame({ "date" : data[0],"value":data[1]},index=[0]) | pandas.DataFrame |
import streamlit as st
import pandas as pd
import altair as alt
def clean_summary_data(file_str:str, name:str):
input_df = pd.read_csv(
file_str,
names=['1', '2','3','type','ministry','source','amount'],
thousands=',')
input_df[['amount']] = input_df[['amount']].fillna(value='EMPTY')
input_df = (input_df.fillna(method='ffill') # populate columns with previous value
.drop(['1','2'], axis=1) # drop unused columns
.drop([0,1,2,3,]) # drop first ununsed rows
)
# remove other income / expenses
input_df.drop(input_df[input_df['3'] == 'Other Expense'].index, inplace = True)
input_df.drop(input_df[input_df['3'] == 'Other Income'].index, inplace = True)
input_df.drop(input_df[input_df['3'] == 'Total Other Income'].index, inplace = True)
input_df.drop(input_df[input_df['3'] == 'Total Other Expense'].index, inplace = True)
input_df = input_df.drop(['3'], axis=1)
# exclude Transfer from Invested Funds
# input_df.drop(input_df[input_df['source'] == 'Transfer from Invested Funds'].index, inplace = True)
# input_df.drop(input_df[input_df['source'] == 'Endowment Fund earnings'].index, inplace = True)
# input_df.drop(input_df[input_df['source'] == 'Endowment Fund earnings'].index, inplace = True)
# remove summary income fields
# input_df.drop(input_df[input_df['ministry'] == 'Transfer from Invested Funds'].index, inplace = True)
input_df.drop(input_df[input_df['ministry'].str.startswith('Total')].index, inplace = True)
input_df.drop(input_df[input_df['type'].str.startswith('Total')].index, inplace = True)
input_df.drop(input_df[input_df['amount'] == 'EMPTY'].index, inplace = True)
# set all amount types to float
input_df['amount'] = input_df['amount'].astype(float)
# input_df = input_df.set_index(['type','ministry', 'source'])
input_df.set_index(['type','ministry','source'])
input_df = input_df.rename(columns={'amount': name})
# Caputre Guest Pastors
guest_pastors_loc = input_df.index[input_df['ministry'] == 'Guest Pastors'].tolist()
if guest_pastors_loc:
input_df.loc[guest_pastors_loc[0], 'source'] = 'Guest Pastors'
input_df.loc[guest_pastors_loc[0], 'ministry'] = 'Pastoral Ministry'
# Capture Severance Pay
severance_loc = input_df.index[input_df['ministry'] == 'Severance Pay'].tolist()
if severance_loc:
input_df.loc[severance_loc[0], 'source'] = 'Severance Pay'
input_df.loc[severance_loc[0], 'ministry'] = 'Pastoral Ministry'
# print(input_df)
return input_df
# @st.cache
def get_UN_data():
data_2018 = clean_summary_data('./data/2018-summary.csv', '2018')
data_2019 = clean_summary_data('./data/2019-summary.csv', '2019')
data_2020 = clean_summary_data('./data/2020-summary.csv', '2020')
# data_2018.join(data_2019, lsuffix='2018')
# data_2018.join(data_2019, lsuffix='2018')
data = pd.merge(data_2018, data_2019, how='outer')
data = pd.merge(data, data_2020, how='outer')
pastoral_ministry = data[data['ministry'] == 'Pastoral Ministry'].groupby('ministry').agg('sum')
admin = data[data['ministry'] == 'Administrative Support']
admin = admin[admin['source'].isin([
'Church Secretary Salary',
'FICA Tax',
'Church Secretary Retirement',
'Medical Insurance'
])].groupby('ministry').agg('sum')
custodian = data[data['ministry'] == 'Facility Support']
custodian = custodian[custodian['source'].isin([
'Custodian Salary',
'FICA Tax',
'Medical Insurance',
'Custodian Retirement'
])].groupby('ministry').agg('sum')
data = | pd.concat([pastoral_ministry, admin, custodian]) | pandas.concat |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
Series,
)
import pandas._testing as tm
import pandas.core.common as com
class TestSample:
@pytest.fixture(params=[Series, DataFrame])
def obj(self, request):
klass = request.param
if klass is Series:
arr = np.random.randn(10)
else:
arr = np.random.randn(10, 10)
return klass(arr, dtype=None)
@pytest.mark.parametrize("test", list(range(10)))
def test_sample(self, test, obj):
# Fixes issue: 2419
# Check behavior of random_state argument
# Check for stability when receives seed or random state -- run 10
# times.
seed = np.random.randint(0, 100)
tm.assert_equal(
obj.sample(n=4, random_state=seed), obj.sample(n=4, random_state=seed)
)
tm.assert_equal(
obj.sample(frac=0.7, random_state=seed),
obj.sample(frac=0.7, random_state=seed),
)
tm.assert_equal(
obj.sample(n=4, random_state=np.random.RandomState(test)),
obj.sample(n=4, random_state=np.random.RandomState(test)),
)
tm.assert_equal(
obj.sample(frac=0.7, random_state=np.random.RandomState(test)),
obj.sample(frac=0.7, random_state=np.random.RandomState(test)),
)
tm.assert_equal(
obj.sample(frac=2, replace=True, random_state=np.random.RandomState(test)),
obj.sample(frac=2, replace=True, random_state=np.random.RandomState(test)),
)
os1, os2 = [], []
for _ in range(2):
np.random.seed(test)
os1.append(obj.sample(n=4))
os2.append(obj.sample(frac=0.7))
tm.assert_equal(*os1)
tm.assert_equal(*os2)
def test_sample_lengths(self, obj):
# Check lengths are right
assert len(obj.sample(n=4) == 4)
assert len(obj.sample(frac=0.34) == 3)
assert len(obj.sample(frac=0.36) == 4)
def test_sample_invalid_random_state(self, obj):
# Check for error when random_state argument invalid.
msg = (
"random_state must be an integer, array-like, a BitGenerator, Generator, "
"a numpy RandomState, or None"
)
with pytest.raises(ValueError, match=msg):
obj.sample(random_state="a_string")
def test_sample_wont_accept_n_and_frac(self, obj):
# Giving both frac and N throws error
msg = "Please enter a value for `frac` OR `n`, not both"
with pytest.raises(ValueError, match=msg):
obj.sample(n=3, frac=0.3)
def test_sample_requires_positive_n_frac(self, obj):
with pytest.raises(
ValueError,
match="A negative number of rows requested. Please provide `n` >= 0",
):
obj.sample(n=-3)
with pytest.raises(
ValueError,
match="A negative number of rows requested. Please provide `frac` >= 0",
):
obj.sample(frac=-0.3)
def test_sample_requires_integer_n(self, obj):
# Make sure float values of `n` give error
with pytest.raises(ValueError, match="Only integers accepted as `n` values"):
obj.sample(n=3.2)
def test_sample_invalid_weight_lengths(self, obj):
# Weight length must be right
msg = "Weights and axis to be sampled must be of same length"
with pytest.raises(ValueError, match=msg):
obj.sample(n=3, weights=[0, 1])
with pytest.raises(ValueError, match=msg):
bad_weights = [0.5] * 11
obj.sample(n=3, weights=bad_weights)
with pytest.raises(ValueError, match="Fewer non-zero entries in p than size"):
bad_weight_series = Series([0, 0, 0.2])
obj.sample(n=4, weights=bad_weight_series)
def test_sample_negative_weights(self, obj):
# Check won't accept negative weights
bad_weights = [-0.1] * 10
msg = "weight vector many not include negative values"
with pytest.raises(ValueError, match=msg):
obj.sample(n=3, weights=bad_weights)
def test_sample_inf_weights(self, obj):
# Check inf and -inf throw errors:
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
msg = "weight vector may not include `inf` values"
with pytest.raises(ValueError, match=msg):
obj.sample(n=3, weights=weights_with_inf)
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
with pytest.raises(ValueError, match=msg):
obj.sample(n=3, weights=weights_with_ninf)
def test_sample_zero_weights(self, obj):
# All zeros raises errors
zero_weights = [0] * 10
with pytest.raises(ValueError, match="Invalid weights: weights sum to zero"):
obj.sample(n=3, weights=zero_weights)
def test_sample_missing_weights(self, obj):
# All missing weights
nan_weights = [np.nan] * 10
with pytest.raises(ValueError, match="Invalid weights: weights sum to zero"):
obj.sample(n=3, weights=nan_weights)
def test_sample_none_weights(self, obj):
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
tm.assert_equal(
obj.sample(n=1, axis=0, weights=weights_with_None), obj.iloc[5:6]
)
@pytest.mark.parametrize(
"func_str,arg",
[
("np.array", [2, 3, 1, 0]),
("np.random.MT19937", 3),
("np.random.PCG64", 11),
],
)
def test_sample_random_state(self, func_str, arg, frame_or_series):
# GH#32503
obj = DataFrame({"col1": range(10, 20), "col2": range(20, 30)})
if frame_or_series is Series:
obj = obj["col1"]
result = obj.sample(n=3, random_state=eval(func_str)(arg))
expected = obj.sample(n=3, random_state=com.random_state(eval(func_str)(arg)))
tm.assert_equal(result, expected)
def test_sample_generator(self, frame_or_series):
# GH#38100
obj = frame_or_series(np.arange(100))
rng = np.random.default_rng()
# Consecutive calls should advance the seed
result1 = obj.sample(n=50, random_state=rng)
result2 = obj.sample(n=50, random_state=rng)
assert not (result1.index.values == result2.index.values).all()
# Matching generator initialization must give same result
# Consecutive calls should advance the seed
result1 = obj.sample(n=50, random_state=np.random.default_rng(11))
result2 = obj.sample(n=50, random_state=np.random.default_rng(11))
tm.assert_equal(result1, result2)
def test_sample_upsampling_without_replacement(self, frame_or_series):
# GH#27451
obj = DataFrame({"A": list("abc")})
if frame_or_series is Series:
obj = obj["A"]
msg = (
"Replace has to be set to `True` when "
"upsampling the population `frac` > 1."
)
with pytest.raises(ValueError, match=msg):
obj.sample(frac=2, replace=False)
class TestSampleDataFrame:
# Tests which are relevant only for DataFrame, so these are
# as fully parametrized as they can get.
def test_sample(self):
# GH#2419
# additional specific object based tests
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = DataFrame(
{
"col1": range(10, 20),
"col2": range(20, 30),
"colString": ["a"] * 10,
"easyweights": easy_weight_list,
}
)
sample1 = df.sample(n=1, weights="easyweights")
tm.assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series or
# DataFrame with axis = 1.
ser = Series(range(10))
msg = "Strings cannot be passed as weights when sampling from a Series."
with pytest.raises(ValueError, match=msg):
ser.sample(n=3, weights="weight_column")
msg = (
"Strings can only be passed to weights when sampling from rows on a "
"DataFrame"
)
with pytest.raises(ValueError, match=msg):
df.sample(n=1, weights="weight_column", axis=1)
# Check weighting key error
with pytest.raises(
KeyError, match="'String passed to weights not a valid column'"
):
df.sample(n=3, weights="not_a_real_column_name")
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = DataFrame({"col1": range(10), "col2": ["a"] * 10})
second_column_weight = [0, 1]
tm.assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[["col2"]]
)
# Different axis arg types
tm.assert_frame_equal(
df.sample(n=1, axis="columns", weights=second_column_weight), df[["col2"]]
)
weight = [0] * 10
weight[5] = 0.5
tm.assert_frame_equal(df.sample(n=1, axis="rows", weights=weight), df.iloc[5:6])
tm.assert_frame_equal(
df.sample(n=1, axis="index", weights=weight), df.iloc[5:6]
)
# Check out of range axis values
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.sample(n=1, axis=2)
msg = "No axis named not_a_name for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.sample(n=1, axis="not_a_name")
ser = Series(range(10))
with pytest.raises(ValueError, match="No axis named 1 for object type Series"):
ser.sample(n=1, axis=1)
# Test weight length compared to correct axis
msg = "Weights and axis to be sampled must be of same length"
with pytest.raises(ValueError, match=msg):
df.sample(n=1, axis=1, weights=[0.5] * 10)
def test_sample_axis1(self):
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = DataFrame(
{"col1": range(10, 20), "col2": range(20, 30), "colString": ["a"] * 10}
)
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
tm.assert_frame_equal(sample1, df[["colString"]])
# Test default axes
tm.assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0, random_state=42)
)
def test_sample_aligns_weights_with_frame(self):
# Test that function aligns weights with frame
df = DataFrame({"col1": [5, 6, 7], "col2": ["a", "b", "c"]}, index=[9, 5, 3])
ser = Series([1, 0, 0], index=[3, 5, 9])
tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=ser))
# Weights have index values to be dropped because not in
# sampled DataFrame
ser2 = Series([0.001, 0, 10000], index=[3, 5, 10])
tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=ser2))
# Weights have empty values to be filed with zeros
ser3 = Series([0.01, 0], index=[3, 5])
tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=ser3))
# No overlap in weight and sampled DataFrame indices
ser4 = Series([1, 0], index=[1, 2])
with pytest.raises(ValueError, match="Invalid weights: weights sum to zero"):
df.sample(1, weights=ser4)
def test_sample_is_copy(self):
# GH#27357, GH#30784: ensure the result of sample is an actual copy and
# doesn't track the parent dataframe / doesn't give SettingWithCopy warnings
df = DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"])
df2 = df.sample(3)
with tm.assert_produces_warning(None):
df2["d"] = 1
def test_sample_does_not_modify_weights(self):
# GH-42843
result = np.array([np.nan, 1, np.nan])
expected = result.copy()
ser = Series([1, 2, 3])
# Test numpy array weights won't be modified in place
ser.sample(weights=result)
tm.assert_numpy_array_equal(result, expected)
# Test DataFrame column won't be modified in place
df = DataFrame({"values": [1, 1, 1], "weights": [1, np.nan, np.nan]})
expected = df["weights"].copy()
df.sample(frac=1.0, replace=True, weights="weights")
result = df["weights"]
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
# coding: utf8
from collections import deque
from collections import Counter
# noinspection PyPackageRequirements
import pytest
from pandas import DataFrame
# noinspection PyProtectedMember
from dfqueue.core.dfqueue import QueuesHandler, QueueHandlerItem, QueueBehaviour
def test_singleton():
handler_a = QueuesHandler()
handler_b = QueuesHandler()
assert id(handler_a) != id(handler_b)
assert id(handler_a._QueuesHandler__instance) == id(handler_b._QueuesHandler__instance)
assert handler_a.default_queue_name == handler_b.default_queue_name
def test_valid_get_item():
handler = QueuesHandler()
default_queue_name = handler.default_queue_name
queue_data = handler[default_queue_name]
assert isinstance(queue_data, dict)
assert len(queue_data) == len(QueueHandlerItem)
assert all([item in queue_data for item in QueueHandlerItem])
assert isinstance(queue_data[QueueHandlerItem.QUEUE], deque)
assert queue_data[QueueHandlerItem.DATAFRAME] is None
assert isinstance(queue_data[QueueHandlerItem.MAX_SIZE], int)
def test_invalid_get_item():
handler = QueuesHandler()
invalid_queue_name = "UNKNOWN"
with pytest.raises(AssertionError):
handler[invalid_queue_name]
@pytest.mark.parametrize("queue_iterable,dataframe,max_size,counter,behaviour", [
(deque(), | DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import numpy as np
from sklearn.impute import SimpleImputer
from sklearn import svm
from sklearn import linear_model
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from scipy.stats import uniform
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.simplefilter(action='ignore', category=ConvergenceWarning)
my_data = pd.read_csv('training.csv')
test = pd.read_csv('testing.csv')
my_data = my_data.drop(['Name'], axis=1).drop(['PassengerId'], axis=1).drop(['Cabin'], axis=1).drop(['Ticket'], axis=1)
my_data["Sex"] = np.where(my_data['Sex'] == 'male', 0, 1)
my_data.loc[(my_data['Embarked'] == 'S', 'Embarked')] = 1
my_data.loc[(my_data['Embarked'] == 'C', 'Embarked')] = 2
my_data.loc[(my_data['Embarked'] == 'Q', 'Embarked')] = 3
my_data = my_data.fillna(np.nan)
#sfdfsd
test = test.drop(['Name'], axis=1).drop(['PassengerId'], axis=1).drop(['Cabin'], axis=1).drop(['Ticket'], axis=1)
test["Sex"] = np.where(test['Sex'] == 'male', 0, 1)
test.loc[(test['Embarked'] == 'S', 'Embarked')] = 1
test.loc[(test['Embarked'] == 'C', 'Embarked')] = 2
test.loc[(test['Embarked'] == 'Q', 'Embarked')] = 3
test = test.fillna(np.nan)
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
test = imp.fit_transform(test)
my_data = imp.fit_transform(my_data)
x = np.delete(my_data, 0, 1).astype(float)
y = [float(i[0]) for i in my_data]
a = test.astype(float)
# pd.option_context('display.max_rows', None, 'display.max_columns', None)
# np.set_printoptions(threshold=np.nan)# more options can be specified also
# f = open("stuff.txt", "w")
# print(my_data, file=f)
# Creating the hyperparameter grid
# Instantiating logistic regression classifier
param_grid = {'C': [0.01, 0.1, 1, 10, 100, 1000, 10000],
'gamma': [10, 1, 0.1, 0.01, 0.001, 0.0001, 0.00001],
'kernel': ['rbf', 'sigmoid']}
model = GridSearchCV(svm.SVC(), param_grid, cv=5, refit = True)
#model = svm.SVC(gamma='scale')
model.fit(x, y)
b = model.predict(a)
b = np.array(b)
out_df = | pd.DataFrame(columns=['solution'], data=b) | pandas.DataFrame |
import os
import sys
import multiprocessing as mp
import string
import platform
import shutil
import os
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from mpl_toolkits.axes_grid1 import make_axes_locatable
import calendar
import pyemu
import flopy
# some global config for plotting
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
abet = string.ascii_uppercase
# some global config for path/directory structure
old_h_dir = os.path.join("..", "ver")
h_nam_file = "eaa_ver.nam"
h_dir = "history"
h_start_datetime = "1-1-2001"
h_end_datetime = "12-31-2015"
old_s_dir = os.path.join("..", "pred")
s_dir = "scenario"
s_nam_file = "eaa_pred.nam"
# history and scenarion simulation start datetimes
s_start_datetime = "1-1-1947"
s_end_datetime = "12-31-1958"
# files with history and scenario observation locations and states
h_hds_file = os.path.join("_data", "reformatted_head_obs.smp")
h_drn_file = os.path.join("_data", "springflow_obs.smp")
h_crd_file = os.path.join("_data", "head_obs.crd")
s_hds_file = os.path.join("_data", "pred_head_obs.smp")
s_drn_file = os.path.join("_data", "pred_springflow_obs.smp")
s_crd_file = os.path.join("_data", "pred_head_obs.crd")
# value of dry cells
hdry = -1.0e+20
# platform-specific binary information
exe_name = "mf2005"
ies_name = "pestpp-ies"
if "window" in platform.platform().lower():
bin_path = os.path.join("bin", "win")
exe_name = exe_name + ".exe"
ies_name = ies_name + ".exe"
elif "darwin" in platform.platform().lower():
bin_path = os.path.join("bin", "mac")
else:
bin_path = os.path.join("bin", "linux")
# the numeric IDs of J-17 and J-27
j17_id = 6837203
j27_id = 6950302
def _setup_model(old_dir, new_dir, start_datetime, nam_file, run=False):
"""load an existing model (either history or scenario) and configure it for
PEST interface construction
Args:
old_dir (str): directory location where the original model resides
new_dir (str): directory location where the new model files will be written
start_datetime (str): string rep of model starting datetime
nam_file (str): MODFLOW-2005 nam file
run (bool): flag to run the model once it is written to new_dir. Default is False
"""
# load the existing model and set some attributes
m = flopy.modflow.Modflow.load(nam_file, model_ws=old_dir, check=False,
verbose=True, forgive=False)
m.start_datetime = start_datetime
m.lpf.hdry = hdry
m.bas6.hnoflo = hdry
# change the workspace to new_dir
m.change_model_ws(new_dir, reset_external=True)
# set the external path so that arrays and lists are outside of the
# terrible MODFLOW file formats
m.external_path = "."
# write the inputs
m.write_input()
# run?
if run:
shutil.copy2(os.path.join(bin_path, exe_name), os.path.join(new_dir, exe_name))
pyemu.os_utils.run("{0} {1}".format(exe_name, nam_file), cwd=new_dir)
def _rectify_wel(model_ws, nam_file, run=True):
"""rectify the stress period WEL file entries so that every
stress period has the same entries (filling missing wells with
"dummy" entries with zero pumping)
Args:
model_ws (str): model workspace
nam_file (str): MODFLOW-2005 nam file
run (bool): flag to run model once the WEL file has been rectified.
Default is True.
"""
# load the model
m = flopy.modflow.Modflow.load(nam_file, model_ws=model_ws, check=False,
verbose=True, forgive=False)
# get the current WEL file datasets
spd = m.wel.stress_period_data
df_dict = {}
all_kij = set()
# run thru all stress periods to get the union of well locations
for kper in range(m.nper):
ra = spd[kper]
df = pd.DataFrame.from_records(ra)
df.loc[:, "kij"] = df.apply(lambda x: (x.k, x.i, x.j), axis=1)
df.loc[:, "kij_str"] = df.kij.apply(lambda x: "{0:01.0f}_{1:03.0f}_{2:03.0f}".format(*x))
df.index = df.kij_str
all_kij.update(set(df.kij_str.tolist()))
print(kper)
df_dict[kper] = df
# work up fast-lookup containers for well location indices
new_index = list(all_kij)
new_k = {s: int(s.split('_')[0]) for s in new_index}
new_i = {s: int(s.split('_')[1]) for s in new_index}
new_j = {s: int(s.split('_')[2]) for s in new_index}
new_index.sort()
# process each stress period
new_spd = {}
for kper, df in df_dict.items():
# reindex with the full kij locations index
df = df.reindex(new_index)
# map the new kijs to the old kijs
for f, d in zip(["k", "i", "j"], [new_k, new_i, new_j]):
isna = df.loc[:, f].isna()
df.loc[isna, f] = [d[kij] for kij in df.loc[isna, :].index.values]
# fill the nans with 0.0
isna = df.flux.isna()
df.loc[isna, "flux"] = 0.0
# deal with the platform numpy int casting issue
if "window" in platform.platform():
df.loc[:, "i"] = df.i.astype(np.int32)
df.loc[:, "j"] = df.j.astype(np.int32)
df.loc[:, "k"] = df.k.astype(np.int32)
else:
df.loc[:, "i"] = df.i.astype(np.int)
df.loc[:, "j"] = df.j.astype(np.int)
df.loc[:, "k"] = df.k.astype(np.int)
spd[kper] = df.loc[:, ["k", "i", "j", "flux"]].to_records(index=False)
# create a new WEL package and replace the old one
flopy.modflow.ModflowWel(m, stress_period_data=spd, ipakcb=m.wel.ipakcb)
# write to a new model_ws with a "_wel" suffix
m.change_model_ws("{0}_wel".format(model_ws))
m.external_path = '.'
m.write_input()
# run?
if run:
shutil.copy2(os.path.join(bin_path, exe_name), os.path.join("{0}_wel".format(model_ws), exe_name))
pyemu.os_utils.run("{0} {1}".format(exe_name, nam_file), cwd="{0}_wel".format(model_ws))
# just to make sure the model ran
new_lst = flopy.utils.MfListBudget(os.path.join("{0}_wel".format(model_ws), nam_file.replace(".nam", ".list")))
def build_rch_zone_array(model_ws, nam_file, plot=False):
"""build a recharge zone integer array for zone-based parameters
using unique values in the in recharge arrays
Args:
model_ws (str): model workspace
nam_file (str): MODFLOW-2005 nam file
plot (bool): flag to plot the zone array. Default is False
"""
m = flopy.modflow.Modflow.load(nam_file, model_ws=model_ws, load_only=["rch"], check=False,
verbose=True, forvive=False)
arr = m.rch.rech[0].array
full_arr = m.rch.rech.array
mn = full_arr.mean(axis=0)[0, :, :]
mn_u, mn_c = np.unique(mn, return_counts=True)
zn_arr = np.zeros_like(arr, dtype=np.int)
for i, u_val in enumerate(mn_u):
# this contional makes sure we keep zeros as zero in the zone array
if u_val == 0.0:
continue
zn_arr[mn == u_val] = i
np.savetxt(os.path.join("_data", "rch_zn_arr.dat"), zn_arr, fmt="%3d")
if plot:
zn_arr = zn_arr.astype(np.float)
zn_arr[zn_arr == 0] = np.NaN
cb = plt.imshow(zn_arr)
plt.colorbar(cb)
plt.show()
def _setup_pst(org_model_ws, new_model_ws, nam_file):
"""construct the PEST interface, set parameter bounds and
generate the prior ensemble
Args:
org_model_ws (str): original model workspace
new_model_ws (str): new model workspace/directory where the
PEST interface will be constructed
nam_file (str): MODFLOW-2005 nam file
"""
# make sure the model simulated heads file exists - need this for observations
if not os.path.exists(os.path.join(org_model_ws, nam_file.replace(".nam", ".hds"))):
raise Exception("need to call _setup_model()")
# load the model from org_model_ws
m= flopy.modflow.Modflow.load(nam_file, model_ws=org_model_ws,
load_only=["dis"], check=False,
verbose=True, forgive=False)
# load the recharge zone array
rch_zn_arr = np.loadtxt(os.path.join("_data", "rch_zn_arr.dat"), dtype=np.int)
# array-based model inputs to parameterize by layer (zero-based)
props = [["lpf.hk", 0], ["lpf.ss", 0], ["lpf.sy", 0], ["bas6.strt", 0]]
# copy to constant (global props)
const_props = props.copy()
# fill a zone-based array inputs container with recharge
# zone pars for each stress period
zone_props = []
zone_props.extend([["rch.rech", kper] for kper in range(m.nper)])
# extend the global parameter container with recharge for each stress period
const_props.extend([["rch.rech", kper] for kper in range(m.nper)])
# include the final simulated groundwater level in every active
# model cell as an "observation" in PEST interface
hds_kperk = [[m.nper - 1, 0]]
# parameterize WEL flux and DRN cond spatially (one par for each entry)
spatial_bc_props = [["wel.flux", 0], ["drn.cond", 0]]
# parameterize WEL flux with a single global multiplier for ecah stress period
temporal_bc_props = [["wel.flux", kper] for kper in range(m.nper)]
#create the pest interface...
ph = pyemu.helpers.PstFromFlopyModel(nam_file, org_model_ws=org_model_ws, new_model_ws=new_model_ws,
grid_props=props,
hds_kperk=hds_kperk, zone_props=zone_props, hfb_pars=True,
remove_existing=True, build_prior=False, k_zone_dict={0: rch_zn_arr},
spatial_bc_props=spatial_bc_props, temporal_bc_props=temporal_bc_props,
model_exe_name=exe_name, pp_props=props, pp_space=30, const_props=const_props)
# set the parameter bounds to Edwards-based physically-plausible values
_set_par_bounds(ph.pst, nam_file)
# geostatistcal draws from the prior
pe = ph.draw(num_reals=300, use_specsim=True)
#add the control file initial values as a realization
pe.add_base()
# enforce parameter bounds on the ensemble
pe.enforce()
# save the ensemble to compressed (PEST extended binary) format
pe.to_binary(os.path.join(new_model_ws, "prior.jcb"))
# save the control file
ph.pst.write(os.path.join(new_model_ws, nam_file.replace(".nam", ".pst")))
# read the array parameter multiplier config file and set a hard upper bound
# on specific yield
df = pd.read_csv(os.path.join(new_model_ws, "arr_pars.csv"))
df.loc[:, "upper_bound"] = np.NaN
df.loc[:, "lower_bound"] = np.NaN
df.loc[df.org_file.apply(lambda x: "sy_" in x), "upper_bound"] = 0.25
df.to_csv(os.path.join(new_model_ws, "arr_pars.csv"))
# put the MODFLOW-2005 and PESTPP-IES binaries in the new_model_ws
shutil.copy2(os.path.join(bin_path, exe_name), os.path.join(new_model_ws, exe_name))
shutil.copy2(os.path.join(bin_path, ies_name), os.path.join(new_model_ws, ies_name))
def _set_par_bounds(pst, nam_file):
"""set the parameter bounds to expert-knowledge-based
ranges
Args:
pst (pyemu.Pst): PEST control file instance
nam_file (str): MODFLOW-2005 nam file
"""
par = pst.parameter_data
# special case for WEL flux pars: more recent time has metering, so less uncertainty
names = par.loc[par.pargp.apply(lambda x: "welflux" in x), "parnme"]
if nam_file == h_nam_file:
par.loc[names, "parlbnd"] = 0.9
par.loc[names, "parubnd"] = 1.1
else:
par.loc[names, "parlbnd"] = 0.7
par.loc[names, "parubnd"] = 1.3
# DRN conductance
names = par.loc[par.pargp.apply(lambda x: "drncond" in x), "parnme"]
par.loc[names, "parlbnd"] = 0.5
par.loc[names, "parubnd"] = 1.5
# initial conditions
names = par.loc[par.pargp.apply(lambda x: "strt" in x), "parnme"]
par.loc[names, "parlbnd"] = 0.9
par.loc[names, "parubnd"] = 1.1
# recharge
names = par.loc[par.pargp.apply(lambda x: "rech" in x), "parnme"]
par.loc[names, "parlbnd"] = 0.8
par.loc[names, "parubnd"] = 1.2
# HK
names = par.loc[par.pargp.apply(lambda x: "hk" in x), "parnme"]
par.loc[names, "parlbnd"] = 0.01
par.loc[names, "parubnd"] = 100
def _add_smp_obs_to_pst(org_model_ws, new_model_ws, pst_name, nam_file, hds_crd_file):
"""add observations to the control file for the locations where groundwater levels
have been measured. The actual value of the observations will be set elsewhere
Args:
org_model_ws (str): original model workspace
new_model_ws (str): new model workspace
pst_name (str): PEST control file name
nam_file (str): MODFLOW-2005 nam file
hds_crd_file (str): PEST-style coordinate file that has been processed
to include k,i,j indices
"""
# make sure the control file exists
pst_name = os.path.join(new_model_ws, pst_name)
assert os.path.exists(pst_name)
# load the model
m = flopy.modflow.Modflow.load(nam_file, model_ws=new_model_ws,
load_only=["dis"], check=False,
forgive=False)
# load the control file
pst = pyemu.Pst(pst_name)
# load GW level location dataframe
crd_df = pd.read_csv(hds_crd_file + ".csv")
#load DRN location dataframe
drn_df = pd.read_csv(os.path.join("_data", "DRN_dict.csv"), delim_whitespace=True,
header=None, names=["name", "k", "i", "j"])
# build a dict of name-index location for DRN locations
kij_dict = {n: [0, i, j] for n, i, j in zip(drn_df.name, drn_df.i, drn_df.j)}
# the name of the DRN budget file
cbd_file = nam_file.replace(".nam", ".cbd")
# get one from the org model workspace and update the path to it
shutil.copy2(os.path.join(org_model_ws, cbd_file), os.path.join(new_model_ws, cbd_file))
cbd_file = os.path.join(new_model_ws, cbd_file)
# setup the forward run DRN budget post processor
prec = "double"
if "win" not in platform.platform().lower(): # not win or darwin
prec = "singl"
cbd_frun, cbd_df = pyemu.gw_utils.setup_hds_timeseries(cbd_file, kij_dict, prefix="drn",
include_path=True, fill=-1.0e+30,
text="drains", precision=prec,
model=m)
# make sure the new DRN instruction file exists
ins_file = "{0}_timeseries.processed.ins".format(cbd_file)
assert os.path.exists(ins_file), ins_file
# add the new DRN observations to the control file
pst.add_observations(ins_file=ins_file, pst_path=".")
# set meaningful obs group names
pst.observation_data.loc[cbd_df.index, "obgnme"] = cbd_df.obgnme
# build a dict of name-index locations for the GW level observations locations
kij_dict = {n: [0, i, j] for n, i, j in zip(crd_df.name, crd_df.i, crd_df.j)}
# setup GW level post processor
hds_file = os.path.join(new_model_ws, nam_file.replace(".nam", ".hds"))
assert os.path.exists(hds_file)
hds_frun, hds_df = pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, prefix="hds",
include_path=True, fill=-1.0e+30, model=m)
# make sure the GW level instruction file exists
ins_file = "{0}_timeseries.processed.ins".format(hds_file)
assert os.path.exists(ins_file), ins_file
# add the GW level obs to the control file and set meaningful
# obs group names
pst.add_observations(ins_file=ins_file, pst_path=".")
pst.observation_data.loc[hds_df.index, "obgnme"] = hds_df.obgnme
# write the updated control file
pst.write(pst_name)
# add the post processor commands to the forward run script
frun_file = os.path.join(new_model_ws, "forward_run.py")
with open(frun_file, 'r') as f:
lines = f.readlines()
idx = None
for i, line in enumerate(lines):
if "__name__" in line:
idx = i
assert idx is not None
lines.insert(idx, " " + cbd_frun + '\n')
lines.insert(idx, " " + hds_frun + '\n')
with open(frun_file, 'w') as f:
for line in lines:
f.write(line)
def add_ij_to_hds_smp(crd_file):
"""intersect the GW level observation coordinates against the
model grid to get k,i,j index information
Args:
crd_file (str): PEST-style "bore coordinates" file
"""
from shapely.geometry import Point
# read the bore coord file
crd_df = pd.read_csv(crd_file, delim_whitespace=True, header=None, names=["name", "x", "y", "layer"])
# set a shapely point attribute
crd_df.loc[:, "pt"] = crd_df.apply(lambda x: Point(x.x, x.y), axis=1)
# load the history model
m = flopy.modflow.Modflow.load(h_nam_file, model_ws=h_dir,
load_only=["dis"], check=False,
forgive=False)
# use the flopy grid intersect functionality
gi = flopy.utils.GridIntersect(m.modelgrid)
crd_df.loc[:, 'ij'] = crd_df.pt.apply(lambda x: gi.intersect_point(x)[0][0])
# split out the i and j indices
crd_df.loc[:, 'i'] = crd_df.ij.apply(lambda x: x[0])
crd_df.loc[:, 'j'] = crd_df.ij.apply(lambda x: x[1])
# remove extra columns
crd_df.pop("ij")
crd_df.pop("pt")
# save the new dataframe to a CSV file
crd_df.to_csv(crd_file + ".csv")
def _set_obsvals(d, nam_file, hds_file, drn_file, pst_file, run=True):
"""samples the groundwater and spring discharge observations to
the model stress periods and sets the "obsval" attribute in the control
file. Also plots up org obs and sampled obs in a multipage pdf
Args:
d (str): directory where the control file exists
nam_file (str): MODFLOW-2005 nam file
hds_file (str): PEST-style site sample file with groundwater
level observations
drn_file (str): PEST-style site sample file with spring discharge
observations
pst_file (str): PEST control file
run (bool): flag to run PESTPP-IES with NOPTMAX=0 after the
observation values have been updated. Default is True.
"""
# load the model
m = flopy.modflow.Modflow.load(nam_file, model_ws=d, load_only=["dis"],
check=False, forgive=False)
# work out the stress period ending datetime
sp_end_dts = | pd.to_datetime(m.start_datetime) | pandas.to_datetime |
import pandas as pd
import requests
import ratelimit
from ratelimit import limits
from ratelimit import sleep_and_retry
def id_to_name(x):
"""
Converts from LittleSis ID number to name.
Parameters
----------
x : LittleSis ID number
Example
-------
>>> id_to_name(96583)
'<NAME>'
"""
path = 'https://littlesis.org/api/entities/{}'.format(x)
response = requests.get(path)
response = response.json()
name = response['data']['attributes']['name']
return name
def name_to_id(name):
"""
Converts from name to LittleSis ID number. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like last name only entries).
Parameters
----------
name : Name to be converted
Example
-------
>>> name_to_id('<NAME>')
96583
"""
path = 'https://littlesis.org/api/entities/search?q={}'.format(name)
response = requests.get(path)
response = response.json()
ID = response['data'][0]['id']
return ID
def entity(name):
"""
Provides info from entity get request to LittleSis API, by name input rather than id
input as is required in original get request format, in JSON format. Resorts to entity with the highest number of relationships listed
for entries that point to multiple entites (like last name only entries).
Parameters
----------
name: Name of 1 individual or organization for which information is desired.
Example
-------
>>> entity('<NAME>'
{'meta': {'copyright': 'LittleSis CC BY-SA 4.0',
'license': 'https://creativecommons.org/licenses/by-sa/4.0/',
'apiVersion': '2.0'},
'data': {'type': 'entities',
'id': 13503,
'attributes': {'id': 13503,
'name': '<NAME>',
'blurb': '44th President of the United States',
'summary': 'The 44th President of the United States, he was sworn into office on January 20, 2009; born in Honolulu, Hawaii, August
4, 1961; obtained early education in Jakarta, Indonesia, and Hawaii; continued education at Occidental College, Los Angeles,
Calif.; received a B.A. in 1983 from Columbia University, New York City; worked as a community organizer in Chicago, Ill.; studied
law at Harvard University, where he became the first African American president of the Harvard Law Review, and received J.D. in
1991; lecturer on constitutional law, University of Chicago; member, Illinois State senate 1997-2004; elected as a Democrat to the
U.S. Senate in 2004 for term beginning January 3, 2005.',
'website': 'http://obama.senate.gov/',
'parent_id': None,
'primary_ext': 'Person',
'updated_at': '2021-12-15T21:28:15Z',
'start_date': '1961-08-04',
'end_date': None,
'aliases': ['Barack Obama'],
'types': ['Person', 'Political Candidate', 'Elected Representative']},
'links': {'self': 'https://littlesis.org/entities/13503-Barack_Obama'}}}
"""
path = 'https://littlesis.org/api/entities/search?q={}'.format(name)
response = requests.get(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}'.format(ID)
response2 = requests.get(path2)
response2 = response2.json()
return response2
def relationships(name):
"""
Provides info from relationships get request to LittleSis API, by name input rather
than id input as is required in original get request format, in JSON format. Resorts to entity with the highest number of
relationships listed for entries that point to multiple entites (like last name only entries).
Parameters
----------
name: Name of 1 individual or organization for which information is desired.
Example
-------
>>> relationships('<NAME>')
{'meta': {'currentPage': 1,
'pageCount': 1,
'copyright': 'LittleSis CC BY-SA 4.0',
'license': 'https://creativecommons.org/licenses/by-sa/4.0/',
'apiVersion': '2.0'},
'data': [{'type': 'relationships',
'id': 1643319,
'attributes': {'id': 1643319,...}}}
"""
path = 'https://littlesis.org/api/entities/search?q={}'.format(name)
response = requests.get(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}/relationships'.format(ID)
response2 = requests.get(path2)
response2 = response2.json()
return response2
@sleep_and_retry
@limits(calls=1, period=1)
def basic_entity(name):
"""
Creates pandas dataframe for one individual or entity with basic information from
entity get request to LittleSis API. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like last name only entries).
Parameters
----------
name: Name of 1 information or entity for which information is desired.
Example
-------
>>> basic_table('<NAME>')
{info name aliases \
0 <NAME> [<NAME>, <NAME>, Mr Steven "Steve P...
info blurb date_of_birth end_date \
0 Apple co-founder, former CEO 1955-02-24 2011-10-05
info types website
0 [Person, Business Person] NaN }
"""
path = 'https://littlesis.org/api/entities/search?q={}'.format(name)
response = requests.get(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}'.format(ID)
response2 = requests.get(path2)
response2 = response2.json()
data2 = response2['data']['attributes']
df = pd.DataFrame(list(data2.items()))
df.columns = ['info', 'value']
df = pd.pivot(df, columns = 'info', values = 'value')
df = df.fillna(method='bfill', axis=0)
df = df.iloc[:1, :]
df = df[['name', 'aliases', 'blurb', 'start_date', 'end_date', 'types', 'website']]
df.rename(columns = {'start_date': 'date_of_birth'}, inplace = True)
return df
@sleep_and_retry
@limits(calls=1, period=1)
def list_entities(*args):
"""
Concatenates dataframes created by basic_table() for entity get requests to LittleSis
API, resulting in pandas dataframe of multiple rows. Resorts to entity with the highest number of relationships listed for entries
that point to multiple entites (like last name only entries).
Parameters
----------
*args: List of names of individuals or entities for which to include information in the resluting dataframe.
Example
-------
>>> list_table('<NAME>', '<NAME>')
{info name aliases \
0 <NAME> [<NAME>, <NAME>, Mr Steven "<NAME>...
1 <NAME> [LeBron James]
info blurb date_of_birth end_date \
0 Apple co-founder, former CEO 1955-02-24 2011-10-05
1 NBA/Los Angeles Lakers—F 1984-12-30 NaN
info types website
0 [Person, Business Person] NaN
1 [Person, Business Person, Media Personality] NaN }
"""
list_of_dfs = []
for name in args:
df = basic_entity(name)
list_of_dfs.append(df)
combined_df = pd.concat(list_of_dfs, ignore_index=True)
return combined_df
@sleep_and_retry
@limits(calls=1, period=1)
def id_to_name(x):
path = 'https://littlesis.org/api/entities/{}'.format(x)
response = requests.get(path)
if response.status_code != 200:
raise Exception('API response: {}'.format(response.status_code))
else:
response = response.json()
name = response['data']['attributes']['name']
return name
def relationships_df(name):
"""
Creates pandas dataframe with information from relationships get request to LittleSis
API.
Parameters
----------
name: Name of one individual or organization for which relationship information is
desired and included in the dataframe.
Example
-------
>>> relationships_df('<NAME>')
primary_entity related_entity amount currency \
0 Children’s Aid Society <NAME> None None
1 <NAME> <NAME> None None
...
category goods filings \
0 None None None
...
"""
path_for_ID_search = 'https://littlesis.org/api/entities/search?q={}'.format(name)
response = requests.get(path_for_ID_search)
response = response.json()
ID = response['data'][0]['id']
path_for_relationships = 'https://littlesis.org/api/entities/{}/relationships'.format(ID)
response2 = requests.get(path_for_relationships)
response2 = response2.json()
relationships = pd.DataFrame(response2['data'])
relationships = pd.DataFrame.to_dict(relationships)
blurbs = | pd.DataFrame(relationships['attributes']) | pandas.DataFrame |
# plot_helper.py (python3)
# utilities for graphic display of training and evaluation of CNNs
# experiments in knowledge documentation; with an application to AI for ethnobotany
# March 2020
#-------------------------------------------------------------------------------
import os, sys, glob
from pyt_utilities import *
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy
import pandas
pandas.set_option('display.max_columns', 50)
| pandas.set_option('display.width', 1000) | pandas.set_option |
# -*- coding: utf-8 -*-
"""
Created on 15/05/2020
@author: yhagos
"""
import pandas as pd
import os
import numpy as np
import itertools
from scipy.spatial.distance import cdist
import multiprocessing as mp
pd.options.mode.chained_assignment = None
class IdentifyMarkersCoExpression:
def __init__(self, combined_cell_pos_dir, patient_id, output_dir, threshold, coexpression_proteins, num_processes=1):
self.combined_cell_pos_dir = combined_cell_pos_dir
self.patient_id = patient_id
self.output_dir = output_dir
self.threshold = threshold
self.coexpression_proteins = coexpression_proteins
self.num_processes = num_processes
def identify_co_expressing_cells(self, file_names_list, process_num):
# create output file names and check if they exist in the path
save_dir = os.path.join(self.output_dir, self.patient_id)
os.makedirs(save_dir, exist_ok=True)
for n_, file_name in enumerate(file_names_list):
print('Process:{}, Patient Id:{}, File name:{}, {}/{}'.format(process_num + 1, self.patient_id, file_name,
n_ + 1, len(file_names_list)))
msi_name = os.path.splitext(file_name)[0]
output_csv = os.path.join(save_dir, msi_name + '_co_exp.csv')
# if os.path.isfile(output_csv) :
# print('{} already exists'.format(output_csv))
# continue
cell_data_df = pd.read_csv(os.path.join(self.combined_cell_pos_dir, self.patient_id, file_name))
col_names = cell_data_df.columns
cell_data_df_copy = cell_data_df.copy()
overlap_df_all = pd.DataFrame(columns=col_names)
# overlap_markes_pos = dict()
for co_exp in self.coexpression_proteins:
co_expression_available = True
coexp_protein_list = co_exp.split('+')
# if len(coexp_protein_list) < 3:
# print('stop here')
# continue
# empty index
coexp_markers_index_database = {f"{protein}": [] for protein in coexp_protein_list}
protein_pairs = list(itertools.combinations(coexp_protein_list, 2))
for protein_pair in protein_pairs:
# print(protein_pair)
# if protein_pair[0] == 'B' and protein_pair[1] == 'C':
# print('stop here')
# protein 1 data frame
protein_1_data = cell_data_df.loc[cell_data_df['Component'] == protein_pair[0]].reset_index()
protein_1_data = protein_1_data.rename(columns={'index': 'INDEX_'})
# for more than 2 markers expression if there is data from previous computation; consider it
if coexp_markers_index_database[protein_pair[0]].__len__() != 0:
protein_1_data = protein_1_data.loc[protein_1_data['INDEX_'].isin(coexp_markers_index_database[protein_pair[0]]), :]
else:
pass
# protein 2 data frame
protein_2_data = cell_data_df.loc[cell_data_df['Component'] == protein_pair[1]].reset_index()
protein_2_data = protein_2_data.rename(columns={'index': 'INDEX_'})
if coexp_markers_index_database[protein_pair[1]].__len__() != 0:
protein_2_data = protein_2_data.loc[protein_2_data['INDEX_'].isin(coexp_markers_index_database[protein_pair[1]]), :]
else:
pass
overlap_index_input1, overlap_index_input2 = self.get_co_exp_cells_detail(protein_1_data, protein_2_data)
if overlap_index_input1.__len__() == 0:
co_expression_available = False
break
indexs_dict = dict()
indexs_dict[protein_pair[0]] = overlap_index_input1
indexs_dict[protein_pair[1]] = overlap_index_input2
coexp_markers_index_database = self.update_coexpression_database(coexp_markers_index_database, indexs_dict)
# update which is overlapping and not
if co_expression_available:
overlapping_indices = self.get_index_co_expressing_markers_position(coexp_markers_index_database)
cell_data_df_copy.loc[overlapping_indices, 'Component'] = 'co_exp'
# get overlap data
overlap_df = self.get_overlap_data(coexp_database=coexp_markers_index_database, data=cell_data_df_copy.copy())
overlap_df['Component'] = co_exp
overlap_df_all = pd.concat([overlap_df_all, overlap_df], ignore_index=True, axis=0, sort=False)
else:
pass
cell_data_df_copy.drop(columns=['Class'], inplace=True)
overlap_df_all.drop(columns=['Class'], inplace=True)
# drop all cells co-expressing different markers from cell_data_df_copy
# cell_data_df_copy.drop(cell_data_df_copy.index[cell_data_df_copy['Component'] == 'co_exp'], inplace=True)
non_overlap_df_data = cell_data_df_copy.loc[cell_data_df_copy['Component'] != 'co_exp', :]
# concatenate single marker expressing cells and co-expressing cells
# combined_df_all = pd.concat([overlap_df_all, cell_data_df_copy], ignore_index=True, axis=0, sort=False)
combined_df_all = pd.concat([overlap_df_all, non_overlap_df_data], ignore_index=True, axis=0, sort=False)
combined_df_all.to_csv(output_csv, index=False)
def get_overlap_data(self, coexp_database: dict, data: pd.DataFrame) -> pd.DataFrame:
df_overlap = | pd.DataFrame() | pandas.DataFrame |
# 参考: https://www.python.ambitious-engineer.com/archives/1630
# 参考: https://note.com/kamakiriphysics/n/n2aec5611af2a
# 参考: https://qiita.com/Gen6/items/2979b84797c702c858b1
import os
from datetime import datetime
from flask import Flask, render_template, request, redirect, url_for, send_from_directory, g, flash
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import glob
import shutil
import argparse
import pathlib
import numpy as np
from numpy import random
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly
import plotly.express as px
import plotly.offline as offline
from PIL import Image
import cv2
import torch
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True,force_reload=True)
import torchvision
# https://stackoverflow.com/questions/68140388/an-error-cache-may-be-out-of-date-try-force-reload-true-comes-up-even-thou
import torch.backends.cudnn as cudnn
from pathlib import Path
# graphファイル削除用
def remove_glob(pathname, recursive=True):
for p in glob.glob(pathname, recursive=recursive):
if os.path.isfile(p):
os.remove(p)
# mp4から画像を抽出
def save_frame_sec(video_path, sec, result_path):
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
return
os.makedirs(os.path.dirname(result_path), exist_ok=True)
fps = cap.get(cv2.CAP_PROP_FPS)
cap.set(cv2.CAP_PROP_POS_FRAMES, round(fps * sec))
ret, frame = cap.read()
if ret:
cv2.imwrite(result_path, frame)
# 物体検出
def dtc_grph_label(img_ad,img_dtct,dtct_lbl,i):
img = [img_ad]
#model = torch.hub.load('ultralytics/yolov5', 'custom', path='static/yolov5s.pt',force_reload=True)
results = model(img)
# plotデータの整理
detect = results.pandas().xyxy[0]
detect['x'] = (detect.xmin + detect.xmax)/2
detect['y'] = (detect.ymin + detect.ymax)/2
detect['size'] = np.sqrt((detect.xmax - detect.xmin)*(detect.ymax - detect.ymin))
detect['frame'] = i
#グラフ作成
fig = plt.figure(figsize=(8, 8))
# fig = plt.figure()
sns.scatterplot(data=detect, x='x', y='y', hue='name',size = detect['size']*100,alpha = 0.5,sizes=(100,500))
plt.xlim(0,np.array(img).shape[2])
plt.ylim(np.array(img).shape[1],0)
#画像の読み込み https://qiita.com/zaburo/items/5637b424c655b136527a
im = Image.open(img_ad)
#画像をarrayに変換
im_list = np.asarray(im)
#貼り付け
plt.imshow(im_list, alpha=1.0)
#表示
plt.axis("off") #https://qiita.com/tsukada_cs/items/8d31a25cd7c860690270
plt.imshow(im, alpha=0.6)
if np.array(img).shape[2] > np.array(img).shape[1]:
plt.legend(bbox_to_anchor=(0, -0.1), loc='upper left', borderaxespad=0, fontsize=8)
else:
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0, fontsize=8)
plt.savefig(img_dtct+'/'+img_ad.split('.')[-2].split('/')[-1]+'_detect.png')
detect.to_csv(dtct_lbl+'/'+img_ad.split('.')[-2].split('/')[-1]+'_label.csv')
app = Flask(__name__)
# ファイル容量を制限する
# https://tanuhack.com/flask-client2server/
app.config['MAX_CONTENT_LENGTH'] = 5 * 1024 * 1024 #5MB
SAVE_DIR = "graph"
if not os.path.isdir(SAVE_DIR):
os.mkdir(SAVE_DIR)
@app.route('/graph/<path:filepath>')
def send_js(filepath):
return send_from_directory(SAVE_DIR, filepath)
@app.route("/", methods=["GET","POST"])
def upload_file():
if request.method == "GET":
return render_template("index.html")
if request.method == "POST":
image = request.files['image']
if image:
remove_glob('./upload/**')
app.logger.info('file_name={}'.format(image.filename))
app.logger.info('content_type={} content_length={}, mimetype={}, mimetype_params={}'.format(
image.content_type, image.content_length, image.mimetype, image.mimetype_params))
#imagefile_en = image.filename.encode('utf-8')
image.save("./upload/"+image.filename)
video_path = "./upload/"+image.filename
video_2_jpg_path = './images/frame'
img_dtct = './images/detect'
dtct_lbl = './images/labels'
remove_glob(video_2_jpg_path+'/**')
remove_glob(img_dtct+'/**')
# ファイルの情報抽出
cap = cv2.VideoCapture(video_path)
video_frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
video_fps = cap.get(cv2.CAP_PROP_FPS)
video_len_sec = video_frame_count / video_fps
print('sec:',video_len_sec)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
print('width:',width)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
print('height:',height)
# 処理開始前に不要データ削除
remove_glob(video_2_jpg_path+'/**')
remove_glob(img_dtct+'/**')
remove_glob(dtct_lbl+'/**')
# framem→jpg→png/csv
stp = 0.5 #stp[sec]に一枚画像取得
nomax ='{0:04d}'.format(int(len(np.arange(0,video_len_sec//1+stp,stp)))-1)
for i,sec in enumerate(np.arange(0,video_len_sec//1+stp,stp)): #再生時間(秒)切り上げ(c//1+1で切り上げ)
no = '{0:04d}'.format(i)
save_frame_sec(video_path, sec, video_2_jpg_path+'/'+no+'.jpg')
dtc_grph_label(video_2_jpg_path+'/'+no+'.jpg',img_dtct,dtct_lbl,i)
print(no,'/',nomax)
remove_glob(video_2_jpg_path+'/**')
# gifの元情報pngファイル確認
files = sorted(glob.glob(img_dtct+'/*.png'))
images = list(map(lambda file: Image.open(file), files))
# 古いgifファイル削除
remove_glob('./graph/**')
#gifファイル作成
filepath = "./graph/" + datetime.now().strftime("%Y%m%d%H%M%S_") + "out.gif"
print(filepath)
images[0].save(filepath, save_all=True, append_images=images[1:], duration=400, loop=0)
# labelファイル抽出・統合
df = pd.DataFrame()
for file_path in pathlib.Path(dtct_lbl).glob('*.csv'):
f_path = pathlib.Path(file_path)
file_name = f_path.name
df_tmp = | pd.read_csv(dtct_lbl+'/'+file_name) | pandas.read_csv |
import pandas as pd
import matplotlib.pyplot as plt
from xgboost import cv
import xgboost as xgb
import joblib
import numpy as np
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
import seaborn as sns
def plot_roc(fpr, tpr, roc_auc):
""" Plot ROC curve. """
#fig = plt.figure()
plt.plot(fpr, tpr)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve (area = %0.6f)' % roc_auc)
plt.legend(loc="lower right")
plt.show()
def randomised_gt(Y):
""" Get a random Y as a sanity check. """
Y = pd.DataFrame(np.random.randint(0, 2, Y.shape[0]), columns=['STK11'])
return Y
def get_train_test_data(X, df_gt, column_name, test_size, randomise_gt=False):
""" Split the data into training and test"""
Y = df_gt[column_name]
if randomise_gt:
Y = randomised_gt(Y)
X_train, X_test, y_train, y_test = train_test_split(X, Y,
test_size=test_size,
random_state=42,
stratify=Y)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
print('total train samples:', y_train.sum())
print('total test samples', y_test.sum())
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test, label=y_test)
return dtrain, dtest, y_test, y_test
def get_params():
""" All of xgboost parameters for training. """
params = {
'learning_rate': 0.01,
'n_estimators': 1000,
'verbose': 1,
'max_depth': 6,
'min_child_weight': 4,
'gamma': 0.6,
'subsample': 0.8,
'colsample_bytree': 0.8,
'reg_alpha': 5e-05,
'max_depth': 10,
'objective': 'binary:logistic',
'nthread': 20,
# 'scale_pos_weight': w,
'seed': 42}
return params
def plot_corr(df_rna, df_gt, column_name):
""" Plot correlation matrices. """
rs = np.random.RandomState(0)
df = | pd.concat([df_rna, df_gt[column_name]], axis=1) | pandas.concat |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
from IPython.core.display import HTML
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
import plotly.offline as py
import plotly.graph_objs as go
import plotly.express as px
class SalesForecaster:
"""This class creates 'easy to handle' forecaster objects
It will gather all the required variables to make the code more readable
- sales_clusters_df (pandas dataframe): The original sales dataframe
The columns are :
- product_code : string values such as CLA0 (CLA is the client and 0 is the product number)
- date : datetime64 (ns) the date of the sale such as pd.to_datetime("2018-01-02") : YYYY-MM-DD
- quantity : int64 an integer value: the number of products for this sale
- cluster : int64 an integer value The cluster the product is part of
- test_date (string : "2019-03-01" : YYYY-MM-DD): the training data is automatically all sales prior to this date
- max_waiting_time (string such as '7 days') : The maximum time a client is willing to wait :
required for grouping orders into batches)
- calendar_length (string such as '7 days'): The calendar length you want to zoom in
"""
def __init__(self,
sales_clusters_df,
test_date,
max_waiting_time,
detailed_view=False,
calendar_length='7 days'
):
self.sales_clusters_df = sales_clusters_df
self.test_date = test_date
self.max_waiting_time = max_waiting_time
self.detailed_view = detailed_view
self.calendar_length = calendar_length
self.optimal_batches = []
self.predicted_batches = []
self.predictions = []
def get_predicted_batches(self):
"""This function takes the original sales df,
computes the dates and quantities models at a product level using the test_date to split the dataset
into a training dataset and a testing dataset,
generates the predicted sales,
computes the associated "predicted" batches using the max waiting time value,
computes the optimal batches using the actual data using the max waiting time value,
outputs the optimal batches df and the predicted batches df,
and 2 graphs to visualize it:
- Input:
All the inputs are encapsulated in the SalesForecaster instance:
- sales_clusters_df
- test_date
- max_waiting_time
- calendar_length
- Output:
- Main graph with optimal batches vs predicted batches for the test data
- The same graph zoomed in the week following the test date
- 1 optimal batches df
- 1 predicted batches df
"""
clusters_list = self.sales_clusters_df['Cluster'].unique()
optimal_batches = []
predicted_batches = []
predictions = []
for cluster in clusters_list:
local_optimal_batches, local_predicted_batches, local_predictions = self.\
get_cluster_level_predicted_batches(cluster)
local_optimal_batches['Cluster'] = cluster
local_predicted_batches['Cluster'] = cluster
optimal_batches.append(local_optimal_batches)
predicted_batches.append(local_predicted_batches)
predictions.append(local_predictions)
optimal_batches = pd.concat(optimal_batches)
optimal_batches.reset_index(drop=True,
inplace=True)
optimal_batches['batch_date'] = optimal_batches.batch_date.str.split(' ').apply(lambda x: x[0])
predicted_batches = pd.concat(predicted_batches)
predicted_batches.reset_index(drop=True,
inplace=True)
predicted_batches['batch_date'] = predicted_batches.batch_date.str.split(' ').apply(lambda x: x[0])
predictions = pd.concat(predictions)
predictions.reset_index(drop=True,
inplace=True)
dark_map = px.colors.qualitative.Dark2
pastel_map = px.colors.qualitative.Pastel2
fig = go.Figure()
for (cluster, dark_color, pastel_color) in zip(clusters_list, dark_map, pastel_map):
local_optimal = optimal_batches[optimal_batches['Cluster'] == cluster]
local_predicted = predicted_batches[predicted_batches['Cluster'] == cluster]
fig.add_trace(go.Bar(x=pd.to_datetime(local_optimal[local_optimal['batch_date'] > self.test_date] \
['batch_date']) - pd.Timedelta('12 hours'),
y=local_optimal[local_optimal['batch_date'] > self.test_date] \
['quantities'],
name='Cluster #{}\nOptimized batches - actual values'.format(cluster),
width=1e3 * pd.Timedelta('6 hours').total_seconds(),
marker_color=dark_color))
fig.add_trace(go.Bar(x=pd.to_datetime(local_predicted[local_predicted['batch_date'] > self.test_date] \
['batch_date']) - pd.Timedelta('12 hours'),
y=local_predicted[local_predicted['batch_date'] > self.test_date] \
['predicted_quantities'],
name='Cluster #{}\nPredicted batches'.format(cluster),
width=1e3 * pd.Timedelta('6 hours').total_seconds(),
marker_color=pastel_color))
# Edit the layout
fig.update_layout(title='Optimal batches vs predicted batches for the test period',
xaxis_title='Date',
yaxis_title='Quantities')
fig.show()
fig = go.Figure()
for (cluster, dark_color, pastel_color) in zip(clusters_list, dark_map, pastel_map):
local_optimal = optimal_batches[optimal_batches['Cluster'] == cluster]
local_predicted = predicted_batches[predicted_batches['Cluster'] == cluster]
fig.add_trace(go.Bar(x=pd.to_datetime(local_optimal[(local_optimal['batch_date'] > self.test_date) & \
(local_optimal['batch_date'] < str((pd.Timestamp(
self.test_date) + pd.Timedelta(self.calendar_length))))] \
['batch_date']) - pd.Timedelta('0 hours'),
y=local_optimal[(local_optimal['batch_date'] > self.test_date) & \
(local_optimal['batch_date'] < str(
(pd.Timestamp(self.test_date) + pd.Timedelta(self.calendar_length))))] \
['quantities'],
name='Cluster #{}\nOptimized batches - actual values'.format(cluster),
width=1e3 * pd.Timedelta('6 hours').total_seconds(),
marker_color=dark_color,
marker_line_color='black',
marker_line_width=1.5,
opacity=0.6))
fig.add_trace(go.Bar(x=pd.to_datetime(local_predicted[(local_predicted['batch_date'] > self.test_date) & \
(local_predicted['batch_date'] < str((pd.Timestamp(
self.test_date) + pd.Timedelta(self.calendar_length))))] \
['batch_date']) - pd.Timedelta('0 hours'),
y=local_predicted[(local_predicted['batch_date'] > self.test_date) & \
(local_predicted['batch_date'] < str(
(pd.Timestamp(self.test_date) + pd.Timedelta(self.calendar_length))))] \
['predicted_quantities'],
name='Cluster #{}\nPredicted batches'.format(cluster),
width=1e3 * pd.Timedelta('6 hours').total_seconds(),
marker_color=pastel_color,
marker_line_color='black',
marker_line_width=1.5,
opacity=0.6))
# Edit the layout
fig.update_layout(barmode='stack', xaxis_tickangle=-45,
title='Optimal batches vs predicted batches for the following week',
xaxis_title='Date',
yaxis_title='Quantities')
fig.show()
fig = go.Figure()
for (cluster, dark_color, pastel_color) in zip(clusters_list, dark_map, pastel_map):
local_optimal = optimal_batches[optimal_batches['Cluster'] == cluster]
local_predicted = predicted_batches[predicted_batches['Cluster'] == cluster]
local_predictions = predictions[predictions['Cluster'] == cluster]
if local_predictions[(local_predictions.ds > self.test_date) & (
local_predictions.ds <= str((pd.Timestamp(self.test_date) + pd.Timedelta(self.calendar_length))))].shape[
0] > 0:
display(HTML(local_predictions[(local_predictions.ds > self.test_date) & (
local_predictions.ds <= str((pd.Timestamp(self.test_date) + pd.Timedelta(self.calendar_length))))][
['ds', 'y', 'product_code', 'Cluster']].to_html()))
if local_predictions[(local_predictions.yhat_date > self.test_date) & (
local_predictions.yhat_date <= str(
(pd.Timestamp(self.test_date) + pd.Timedelta(self.calendar_length))))].shape[
0] > 0:
display(HTML(local_predictions[(local_predictions.yhat_date > self.test_date) & (
local_predictions.yhat_date <= str((pd.Timestamp(self.test_date) + pd.Timedelta(self.calendar_length))))][
['yhat_date', 'yhat_qty', 'product_code', 'Cluster']].to_html()))
fig.add_trace(go.Bar(x=pd.to_datetime(local_optimal[(local_optimal['batch_date'] > self.test_date) & \
(local_optimal['batch_date'] <= str((pd.Timestamp(
self.test_date) + pd.Timedelta(self.calendar_length))))] \
['batch_date']) - pd.Timedelta('0 hours'),
y=local_optimal[(local_optimal['batch_date'] > self.test_date) & \
(local_optimal['batch_date'] <= str(
(pd.Timestamp(self.test_date) + pd.Timedelta(self.calendar_length))))] \
['quantities'],
name='Cluster #{}\nOptimized batches - actual values'.format(cluster),
width=1e3 * pd.Timedelta('6 hours').total_seconds(),
marker_color=dark_color,
marker_line_color='black',
marker_line_width=1.5,
opacity=0.6))
fig.add_trace(go.Bar(x=pd.to_datetime(local_predicted[(local_predicted['batch_date'] > self.test_date) & \
(local_predicted['batch_date'] <= str((pd.Timestamp(
self.test_date) + pd.Timedelta(self.calendar_length))))] \
['batch_date']) - pd.Timedelta('0 hours'),
y=local_predicted[(local_predicted['batch_date'] > self.test_date) & \
(local_predicted['batch_date'] <= str(
(pd.Timestamp(self.test_date) + pd.Timedelta(self.calendar_length))))] \
['predicted_quantities'],
name='Cluster #{}\nPredicted batches'.format(cluster),
width=1e3 * pd.Timedelta('6 hours').total_seconds(),
marker_color=pastel_color,
marker_line_color='black',
marker_line_width=1.5,
opacity=0.6))
fig.add_trace(go.Scatter(x=pd.to_datetime(local_predictions[(local_predictions.ds > self.test_date) & (
local_predictions.ds <= str((pd.Timestamp(self.test_date) + pd.Timedelta(self.calendar_length))))]['ds']),
y=local_predictions[
(local_predictions.ds > self.test_date) & (local_predictions.ds <= str(
(pd.Timestamp(self.test_date) + pd.Timedelta(self.calendar_length))))]['y'],
marker=dict(
color=dark_color,
size=10,
line=dict(
color='white',
width=2
)
),
mode='markers',
name='actual_sales'))
fig.add_trace(go.Scatter(x=pd.to_datetime(local_predictions[(local_predictions.yhat_date > self.test_date) & (
local_predictions.yhat_date <= str((pd.Timestamp(self.test_date) + pd.Timedelta(self.calendar_length))))][
'yhat_date']),
y=local_predictions[(local_predictions.yhat_date > self.test_date) & (
local_predictions.yhat_date <= str(
(pd.Timestamp(self.test_date) + pd.Timedelta(self.calendar_length))))]['yhat_qty'],
marker=dict(
color=pastel_color,
size=10,
line=dict(
color='white',
width=2
)
),
mode='markers',
name='predicted_sales'))
# Edit the layout
fig.update_layout(barmode='stack', xaxis_tickangle=-45,
title='Optimal batches vs predicted batches for the following week \nPLUS product_code level sales (predicted and actual)',
xaxis_title='Date',
yaxis_title='Quantities')
fig.show()
local_predictions = predictions[predictions['ds'] > self.test_date]
sns.set(style="white")
# Show the joint distribution using kernel density estimation
g = sns.jointplot(
pd.Series(local_predictions['error_days'].values / (24 * 60 * 60 * 1e9), name='error_days\nin days'),
| pd.Series(local_predictions['error_quantities'].values, name='error_quantities\nin%') | pandas.Series |
from opentrons import robot, containers, instruments
from datetime import datetime
import numpy as np
import pandas as pd
import getch
import shutil
import os
import sys
def initialize_pipettes(p10_tipracks,p10s_tipracks,p200_tipracks,trash):
# Declare all of the pipettes
p10 = instruments.Pipette(
axis='a',
max_volume=10,
min_volume=0.5,
tip_racks=p10_tipracks,
trash_container=trash,
channels=8,
name='p10-8',
aspirate_speed=400,
dispense_speed=800
)
p10s = instruments.Pipette(
axis='a',
max_volume=10,
min_volume=0.5,
tip_racks=p10s_tipracks,
trash_container=trash,
channels=1,
name='p10-8s',
aspirate_speed=400,
dispense_speed=800
)
p200 = instruments.Pipette(
axis='b',
max_volume=200,
min_volume=20,
tip_racks=p200_tipracks,
trash_container=trash,
channels=1,
name='p200-1',
aspirate_speed=400,
dispense_speed=800
)
return p10,p10s,p200
def display_deck(robot):
df = pd.DataFrame(np.zeros((3,5)), columns=['A','B','C','D','E'], index=['3','2','1'])
df.loc[:,:] = "---"
for slot in robot.deck:
for child in slot.get_children_list():
print(slot.get_name()[0],slot.get_name()[1],child.get_name())
df.loc[slot.get_name()[1],slot.get_name()[0]] = child.get_name()
print(df)
def print_layout(locations):
# Generate an empty dataframe with the right shape
layout_table = pd.DataFrame(np.zeros((3,5)), columns=['A','B','C','D','E'], index=['3','2','1'])
layout_table.loc[:,:] = "---"
# Fill in the data frame with the locations
for obj in locations:
layout_table.loc[locations[obj][1], locations[obj][0]] = obj
# Displays the required plate map and waits to proceed
print("\n Please arrange the items in the following configuration: \n")
print(layout_table,"\n")
input("Press enter to continue")
def change_speed(robot):
robot.head_speed(5000)
def change_height(pipette,container,target,recalibrate=False):
counter = 0
z = 0
print("Change height - s-g:up h-l:down x:exit")
while True:
c = getch.getch()
if c == "s":
print("Up 20mm")
pipette.robot._driver.move(z=20,mode="relative")
z += 20
elif c == "d":
print("Up 5mm")
pipette.robot._driver.move(z=5,mode="relative")
z += 5
elif c == "f":
print("Up 0.5mm")
pipette.robot._driver.move(z=0.5,mode="relative")
z += 0.5
elif c == "g":
print("Up 0.1mm")
pipette.robot._driver.move(z=0.1,mode="relative")
z += 0.1
elif c == "h":
print("Down 0.1mm")
pipette.robot._driver.move(z=-0.1,mode="relative")
z += -0.1
elif c == "j":
print("Down 0.5mm")
pipette.robot._driver.move(z=-0.5,mode="relative")
z += -0.5
elif c == "k":
print("Down 5mm")
pipette.robot._driver.move(z=-5,mode="relative")
z += -5
elif c == "l":
print("Down 20mm")
pipette.robot._driver.move(z=-20,mode="relative")
z += -20
elif c == "x":
print("Exit")
break
counter += 1
pipette.calibrate_position((container,target.from_center(x=0, y=0, z=-1,reference=container)))
if recalibrate:
if counter > 1:
print("Will recalibrate")
redo = True
else:
print("Calibrated")
redo = False
return redo,z
else:
return z
def well_addresses():
'''Generates a list of well address A1-H12'''
letter = ["A","B","C","D","E","F","G","H"]
number = ["1","2","3","4","5","6","7","8","9","10","11","12"]
target_well = []
temp_well = 0
for n in number:
for l in letter:
temp_well = l + n
target_well.append(temp_well)
return target_well
def print_center(statement):
columns = shutil.get_terminal_size().columns
print('\n',statement.center(columns))
def request_info(statement,type='string',length=0,select_from=[]):
answer = input(statement)
if answer == '':
print("Please enter a value\n")
return request_info(statement,type=type)
elif type == 'int':
try:
int(answer)
return int(answer)
except:
print("Invalid type\n")
return request_info(statement,type=type)
elif type == 'list':
try:
nums = [int(num) for num in answer.split(' ')]
if len(nums) != length:
print('Requires {} inputs'.format(length))
return request_info(statement,type=type,length=length)
return [int(num) for num in answer.split(' ')]
except:
print("Invalid type\n")
return request_info(statement,type=type,length=length)
if select_from != []:
if answer not in select_from:
print('Not in list')
print(select_from)
return request_info(statement,type=type,select_from=select_from)
else:
return answer
return answer
def make_directory(path):
dir_name = path.split("/")[-1]
if os.path.exists(path):
print("Directory {} already exists".format(dir_name))
else:
# Generates a new directory with the ID# as its name
os.makedirs(path)
print("Making directory for {}".format(dir_name))
def check_robot():
try:
robot_name = str(os.environ["ROBOT_DEV"][-5:])
except:
sys.exit("Not connected to a robot, run roboswitch <robot_name> to change the robot")
robot_number = int(request_info("Run on this robot: {} ? 1-Yes, 2-No ".format(robot_name),type='int'))
if robot_number == 1:
print("Proceeding with run")
else:
sys.exit("Run `roboswitch <robot_name>` to change the robot")
def list_to_string(ls):
string = ''
for l in ls:
string += "'{}',".format(l)
return string[:-1]
def query_for_parts(status,enzyme,engine):
query = "SELECT parts.part_id,parts.status,fragments.fragment_name,plates.plate_id,wells.address,wells.volume,plates.id FROM parts\
INNER JOIN part_frag ON parts.id = part_frag.part_id\
INNER JOIN fragments ON part_frag.fragment_id = fragments.id\
INNER JOIN wells ON fragments.id = wells.fragment_id\
INNER JOIN plates on wells.plate_id = plates.id\
WHERE parts.status IN ({})\
AND parts.cloning_enzyme = '{}'".format(list_to_string(status),enzyme)
return pd.read_sql_query(query, con=engine)
def query_for_plates(parts,engine):
query = "SELECT parts.part_id,fragments.fragment_name,plates.plate_id,wells.address,wells.volume,plates.id,plates.plate_name FROM parts\
INNER JOIN part_frag ON parts.id = part_frag.part_id\
INNER JOIN fragments ON part_frag.fragment_id = fragments.id\
INNER JOIN wells ON fragments.id = wells.fragment_id\
INNER JOIN plates on wells.plate_id = plates.id\
WHERE parts.part_id IN ({})".format(list_to_string(parts))
return pd.read_sql_query(query, con=engine)
def query_for_fragments(parts,engine):
query = "SELECT parts.part_id,fragments.fragment_name FROM parts\
INNER JOIN part_frag ON parts.id = part_frag.part_id\
INNER JOIN fragments ON part_frag.fragment_id = fragments.id\
WHERE parts.part_id IN ({})".format(list_to_string(parts))
return pd.read_sql_query(query, con=engine)
def query_everything(engine):
print()
print(datetime.now(),'Began run')
query_outcomes = "SELECT parts.part_id,parts.status,wells.seq_outcome,wells.plate_type,builds.build_name,wells.misplaced FROM parts \
INNER JOIN wells ON parts.id = wells.part_id\
INNER JOIN plates ON wells.plate_id = plates.id\
INNER JOIN builds ON plates.build_id = builds.id"
query_frag = "SELECT parts.part_id,fragments.fragment_name FROM parts\
INNER JOIN part_frag ON parts.id = part_frag.part_id\
INNER JOIN fragments ON part_frag.fragment_id = fragments.id"
query_parts = "SELECT * FROM parts"
df_frag = pd.read_sql_query(query_frag, con=engine)
frags = df_frag.groupby('part_id')['fragment_name'].agg(len)
frags.name = 'Count'
frags = pd.DataFrame(frags).reset_index()
frags_dict = dict(zip(frags.part_id.tolist(),frags.Count.tolist()))
# subs_dict = dict(zip(df_frag.part_id.tolist(),df_frag.sub_name.tolist()))
print(datetime.now(),'Finished analyzing fragments')
def multiple(x):
if len(x) == 1:
x.append('N/A')
return x
def find_outcome(x):
if x in df_out_dict.keys():
return df_out_dict[x]
else:
return ['N/A','N/A']
def find_build(x):
if x in df_build_dict.keys():
return df_build_dict[x]
else:
return ['N/A','N/A']
def simplify_outcome(x):
if "mutation" in x:
return 'cloning_mutation'
elif "bad" in x:
return 'sequence_failure'
else:
return x
df_res = | pd.read_sql_query(query_outcomes, con=engine) | pandas.read_sql_query |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 06 14:25:48 2016
@author: vskritsk
"""
import pandas as pd
import numpy as np
import os
pd.set_option('display.expand_frame_repr', False)
| pd.set_option('max_rows', 280) | pandas.set_option |
import pandas as pd
import networkx as nx
import warnings
import seaborn as sns
import numpy as np
import matplotlib.patches as mpatches
import microbe_directory as md
from capalyzer.packet_parser import DataTableFactory, NCBITaxaTree, annotate_taxa, TaxaTree
from capalyzer.packet_parser.data_utils import group_small_cols
from capalyzer.packet_parser.diversity_metrics import (
shannon_entropy, richness, chao1, rarefaction_analysis
)
from sklearn.decomposition import PCA
from scipy.cluster.hierarchy import linkage, cophenet, leaves_list
from scipy.spatial.distance import squareform, pdist, jensenshannon
from os.path import join
from metasub_utils.packet_parse import MetaSUBTableFactory
from capalyzer.packet_parser.experimental import umap
from capalyzer.packet_parser.data_utils import group_small_cols
from capalyzer.packet_parser.normalize import proportions, prevalence
from plotnine import *
from scipy.cluster.hierarchy import fcluster
from matplotlib import pyplot as plt
from capalyzer.constants import MICROBE_DIR
from .figs_data import MetaSUBFiguresData
class MetaSUBFigures(MetaSUBFiguresData):
def tbl1(self):
"""Return a pandas dataframe listing where and when samples were collected."""
tbl = self.meta.copy()
tbl = tbl.loc[tbl['control_type'].isna()]
tbl = tbl.loc[~tbl['city'].isna()]
tbl = tbl.query('city != "other"')
tbl = pd.crosstab(tbl['city'], tbl['project'])
tbl['Region'] = self.meta.groupby('city').apply(lambda x: x['continent'].iloc[0])
tbl['Region'] = tbl['Region'].str.replace('_', ' ').str.title()
tbl.index = tbl.index.str.replace('_', ' ').str.title()
tbl = tbl.set_index('Region', append=True)
tbl = tbl.reorder_levels(['Region', 'city'])
tbl = tbl.sort_index()
other_projs = list(tbl.columns[tbl.sum(axis=0) < 100]) + ['PATHOMAP_WINTER']
tbl['Other'] = tbl[other_projs].sum(axis=1)
tbl = tbl.drop(columns=other_projs)
tbl['Total'] = tbl.sum(axis=1)
tbl = tbl[['PILOT', 'CSD16', 'CSD17', 'Other', 'Total']] # column order
continent_totals = tbl.groupby(level=0).sum()
continent_totals['city'] = 'AAA Region Total' # AAA so sort puts these first
continent_totals = continent_totals.set_index('city', append=True)
tbl = pd.concat([tbl, continent_totals]).sort_index()
ctrl = self.meta.copy()
ctrl = ctrl.loc[~ctrl['control_type'].isna()]
ctrl = pd.crosstab(ctrl['control_type'], ctrl['project'])
ctrl.index.names = ['city']
ctrl['Region'] = 'Control'
ctrl = ctrl.set_index('Region', append=True)
ctrl = ctrl.reorder_levels(['Region', 'city'])
other_projs = ctrl.columns[ctrl.sum(axis=0) < 10]
ctrl['Other'] = ctrl[other_projs].sum(axis=1)
ctrl = ctrl.drop(columns=other_projs)
ctrl['Total'] = ctrl.sum(axis=1)
cols = [
col for col in ['PILOT', 'CSD16', 'CSD17', 'Other', 'Total']
if col in ctrl.columns
]
ctrl = ctrl[cols]
tbl = pd.concat([ctrl, tbl])
tbl.index = tbl.index.set_levels(tbl.index.levels[1].str.replace('AAA', ''), level=1)
return tbl
def fig1(self, N=75):
"""Figure showing the major taxa found in the metasub data."""
return [
self.fig1_core_taxa_tree(),
self.fig1_prevalence_curve(),
self.fig1_major_taxa_curves(N=N),
self.fig1_species_rarefaction(),
self.fig1_reference_comparisons(),
self.fig1_fraction_unclassified(),
]
def fig1_core_taxa_tree(self):
"""Return an ETE tree showing core taxa with annotations."""
def fig1_prevalence_curve(self):
"""Return a P9 figure showing the distribution of species prevalences."""
prev = pd.DataFrame({
'total': prevalence(self.wide_taxa),
'city': self.wide_taxa.groupby(by=self.meta['city']).apply(prevalence).mean(axis=0),
})
prev['taxa'] = prev.index
prev_flat = prev.melt(id_vars='taxa')
plot = (
ggplot(prev_flat, aes(x='value', color='variable', fill='variable')) +
geom_density(size=2, alpha=0.2) +
theme_minimal() +
xlab('Species Prevalence') +
ylab('Density') +
geom_vline(xintercept=0.25, color='black') +
geom_vline(xintercept=0.70, color='black') +
geom_vline(xintercept=0.95, color='black') +
annotate(geom='label', x=0.65, y=2.9, label="Sub-Core 70-95% (1,084)", size=20) +
annotate(geom='label', x=0.33, y=3.5, label="Peripheral, < 25% (2,466)", size=20) +
annotate(geom='label', x=0.78, y=3.2, label="Core > 95% (61)", size=20) +
scale_color_brewer(type='qualitative', palette=6, direction=1) +
scale_fill_brewer(type='qualitative', palette=6, direction=1) +
theme(
text=element_text(size=20),
axis_text_x=element_text(angle=0, hjust=1),
figure_size=(8, 8),
legend_position='none',
)
)
return plot
def fig1_major_taxa_curves(self, N=75):
"""Return two P9 panels showing prevalence and abundance distributions of major taxa."""
taxa = self.wide_taxa_rel
city = taxa.groupby(by=self.meta['city']).median()
top_taxa = taxa.mean().sort_values(ascending=False)[:N].index
taxa, city = 1000 * 1000 * taxa[top_taxa], 1000 * 1000 * city[top_taxa]
taxa_prev, city_prev = prevalence(taxa), prevalence(city)
taxa_prev = pd.DataFrame({'taxon': taxa_prev.index, 'prevalence': taxa_prev, 'names': taxa_prev.index})
city_prev = pd.DataFrame({'taxon': city_prev.index, 'prevalence': city_prev, 'names': city_prev.index})
taxa_mean, taxa_kurtosis, taxa_sd = taxa.mean(), taxa.kurtosis(), taxa.std()
def add_stats(taxon):
m, k, sd = taxa_mean[taxon], taxa_kurtosis[taxon], taxa_sd[taxon]
return f'{taxon} ({m // 1000:.0f}k, {sd // 1000:.0f}k, {k:.0f})'
taxa.columns = taxa.columns.to_series().apply(add_stats)
city.columns = city.columns.to_series().apply(add_stats)
top_taxa_stat = top_taxa.to_series().apply(add_stats)
taxa, city = taxa.melt(), city.melt()
taxa['variable'] = pd.Categorical(taxa['variable'], categories=top_taxa_stat)
city['variable'] = | pd.Categorical(city['variable'], categories=top_taxa_stat) | pandas.Categorical |
#
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing out the datacompy functionality
"""
import io
import logging
import sys
from datetime import datetime
from decimal import Decimal
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal
from pytest import raises
import datacompy
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def test_numeric_columns_equal_abs():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_numeric_columns_equal_rel():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |False
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |False
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|True
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_date_columns_equal():
data = """a|b|expected
2017-01-01|2017-01-01|True
2017-01-02|2017-01-02|True
2017-10-01|2017-10-10|False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_unequal():
"""I want datetime fields to match with dates stored as strings
"""
df = pd.DataFrame([{"a": "2017-01-01", "b": "2017-01-02"}, {"a": "2017-01-01"}])
df["a_dt"] = pd.to_datetime(df["a"])
df["b_dt"] = pd.to_datetime(df["b"])
assert datacompy.columns_equal(df.a, df.a_dt).all()
assert datacompy.columns_equal(df.b, df.b_dt).all()
assert datacompy.columns_equal(df.a_dt, df.a).all()
assert datacompy.columns_equal(df.b_dt, df.b).all()
assert not datacompy.columns_equal(df.b_dt, df.a).any()
assert not datacompy.columns_equal(df.a_dt, df.b).any()
assert not datacompy.columns_equal(df.a, df.b_dt).any()
assert not datacompy.columns_equal(df.b, df.a_dt).any()
def test_bad_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[{"a": "2017-01-01", "b": "2017-01-01"}, {"a": "2017-01-01", "b": "217-01-01"}]
)
df["a_dt"] = pd.to_datetime(df["a"])
assert not datacompy.columns_equal(df.a_dt, df.b).any()
def test_rounded_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.000000", "exp": True},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.123456", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:01.000000", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00", "exp": True},
]
)
df["a_dt"] = pd.to_datetime(df["a"])
actual = datacompy.columns_equal(df.a_dt, df.b)
expected = df["exp"]
assert_series_equal(actual, expected, check_names=False)
def test_decimal_float_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": False},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_float_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": True},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": False},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_infinity_and_beyond():
df = pd.DataFrame(
[
{"a": np.inf, "b": np.inf, "expected": True},
{"a": -np.inf, "b": -np.inf, "expected": True},
{"a": -np.inf, "b": np.inf, "expected": False},
{"a": np.inf, "b": -np.inf, "expected": False},
{"a": 1, "b": 1, "expected": True},
{"a": 1, "b": 0, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1", "expected": False},
{"a": 1, "b": "yo", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces_and_case():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
{"a": "Hi", "b": "hI ", "expected": True},
{"a": "HI", "b": "HI ", "expected": True},
{"a": "hi", "b": "hi ", "expected": True},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True, ignore_case=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_compare_df_setter_bad():
df = pd.DataFrame([{"a": 1, "A": 2}, {"a": 2, "A": 2}])
with raises(TypeError, match="df1 must be a pandas DataFrame"):
compare = datacompy.Compare("a", "a", ["a"])
with raises(ValueError, match="df1 must have all columns from join_columns"):
compare = datacompy.Compare(df, df.copy(), ["b"])
with raises(ValueError, match="df1 must have unique column names"):
compare = datacompy.Compare(df, df.copy(), ["a"])
df_dupe = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 3}])
assert datacompy.Compare(df_dupe, df_dupe.copy(), ["a", "b"]).df1.equals(df_dupe)
def test_compare_df_setter_good():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"A": 1, "B": 2}, {"A": 2, "B": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
assert compare.join_columns == ["a"]
compare = datacompy.Compare(df1, df2, ["A", "b"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
assert compare.join_columns == ["a", "b"]
def test_compare_df_setter_different_cases():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"A": 1, "b": 2}, {"A": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
def test_compare_df_setter_bad_index():
df = pd.DataFrame([{"a": 1, "A": 2}, {"a": 2, "A": 2}])
with raises(TypeError, match="df1 must be a pandas DataFrame"):
compare = datacompy.Compare("a", "a", on_index=True)
with raises(ValueError, match="df1 must have unique column names"):
compare = datacompy.Compare(df, df.copy(), on_index=True)
def test_compare_on_index_and_join_columns():
df = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
with raises(Exception, match="Only provide on_index or join_columns"):
compare = datacompy.Compare(df, df.copy(), on_index=True, join_columns=["a"])
def test_compare_df_setter_good_index():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, on_index=True)
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
def test_columns_overlap():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1_unq_columns() == set()
assert compare.df2_unq_columns() == set()
assert compare.intersect_columns() == {"a", "b"}
def test_columns_no_overlap():
df1 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "yo"}])
df2 = pd.DataFrame([{"a": 1, "b": 2, "d": "oh"}, {"a": 2, "b": 3, "d": "ya"}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1_unq_columns() == {"c"}
assert compare.df2_unq_columns() == {"d"}
assert compare.intersect_columns() == {"a", "b"}
def test_10k_rows():
df1 = pd.DataFrame(np.random.randint(0, 100, size=(10000, 2)), columns=["b", "c"])
df1.reset_index(inplace=True)
df1.columns = ["a", "b", "c"]
df2 = df1.copy()
df2["b"] = df2["b"] + 0.1
compare_tol = datacompy.Compare(df1, df2, ["a"], abs_tol=0.2)
assert compare_tol.matches()
assert len(compare_tol.df1_unq_rows) == 0
assert len(compare_tol.df2_unq_rows) == 0
assert compare_tol.intersect_columns() == {"a", "b", "c"}
assert compare_tol.all_columns_match()
assert compare_tol.all_rows_overlap()
assert compare_tol.intersect_rows_match()
compare_no_tol = datacompy.Compare(df1, df2, ["a"])
assert not compare_no_tol.matches()
assert len(compare_no_tol.df1_unq_rows) == 0
assert len(compare_no_tol.df2_unq_rows) == 0
assert compare_no_tol.intersect_columns() == {"a", "b", "c"}
assert compare_no_tol.all_columns_match()
assert compare_no_tol.all_rows_overlap()
assert not compare_no_tol.intersect_rows_match()
@mock.patch("datacompy.logging.debug")
def test_subset(mock_debug):
df1 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "yo"}])
df2 = pd.DataFrame([{"a": 1, "c": "hi"}])
comp = datacompy.Compare(df1, df2, ["a"])
assert comp.subset()
assert mock_debug.called_with("Checking equality")
@mock.patch("datacompy.logging.info")
def test_not_subset(mock_info):
df1 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "yo"}])
df2 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "great"}])
comp = datacompy.Compare(df1, df2, ["a"])
assert not comp.subset()
assert mock_info.called_with("Sample c mismatch: a: 2, df1: yo, df2: great")
def test_large_subset():
df1 = pd.DataFrame(np.random.randint(0, 100, size=(10000, 2)), columns=["b", "c"])
df1.reset_index(inplace=True)
df1.columns = ["a", "b", "c"]
df2 = df1[["a", "b"]].sample(50).copy()
comp = datacompy.Compare(df1, df2, ["a"])
assert not comp.matches()
assert comp.subset()
def test_string_joiner():
df1 = pd.DataFrame([{"ab": 1, "bc": 2}, {"ab": 2, "bc": 2}])
df2 = pd.DataFrame([{"ab": 1, "bc": 2}, {"ab": 2, "bc": 2}])
compare = datacompy.Compare(df1, df2, "ab")
assert compare.matches()
def test_decimal_with_joins():
df1 = pd.DataFrame([{"a": Decimal("1"), "b": 2}, {"a": Decimal("2"), "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
compare = datacompy.Compare(df1, df2, "a")
assert compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_decimal_with_nulls():
df1 = pd.DataFrame([{"a": 1, "b": Decimal("2")}, {"a": 2, "b": Decimal("2")}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}, {"a": 3, "b": 2}])
compare = datacompy.Compare(df1, df2, "a")
assert not compare.matches()
assert compare.all_columns_match()
assert not compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_strings_with_joins():
df1 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
compare = datacompy.Compare(df1, df2, "a")
assert compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_index_joining():
df1 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
compare = datacompy.Compare(df1, df2, on_index=True)
assert compare.matches()
def test_index_joining_strings_i_guess():
df1 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df1.index = df1["a"]
df2.index = df2["a"]
df1.index.name = df2.index.name = None
compare = datacompy.Compare(df1, df2, on_index=True)
assert compare.matches()
def test_index_joining_non_overlapping():
df1 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}, {"a": "back fo mo", "b": 3}])
compare = datacompy.Compare(df1, df2, on_index=True)
assert not compare.matches()
assert compare.all_columns_match()
assert compare.intersect_rows_match()
assert len(compare.df1_unq_rows) == 0
assert len(compare.df2_unq_rows) == 1
assert list(compare.df2_unq_rows["a"]) == ["back fo mo"]
def test_temp_column_name():
df1 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}, {"a": "back fo mo", "b": 3}])
actual = datacompy.temp_column_name(df1, df2)
assert actual == "_temp_0"
def test_temp_column_name_one_has():
df1 = pd.DataFrame([{"_temp_0": "hi", "b": 2}, {"_temp_0": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}, {"a": "back fo mo", "b": 3}])
actual = datacompy.temp_column_name(df1, df2)
assert actual == "_temp_1"
def test_temp_column_name_both_have():
df1 = pd.DataFrame([{"_temp_0": "hi", "b": 2}, {"_temp_0": "bye", "b": 2}])
df2 = pd.DataFrame(
[{"_temp_0": "hi", "b": 2}, {"_temp_0": "bye", "b": 2}, {"a": "back fo mo", "b": 3}]
)
actual = datacompy.temp_column_name(df1, df2)
assert actual == "_temp_1"
def test_temp_column_name_both_have():
df1 = pd.DataFrame([{"_temp_0": "hi", "b": 2}, {"_temp_0": "bye", "b": 2}])
df2 = pd.DataFrame(
[{"_temp_0": "hi", "b": 2}, {"_temp_1": "bye", "b": 2}, {"a": "back fo mo", "b": 3}]
)
actual = datacompy.temp_column_name(df1, df2)
assert actual == "_temp_2"
def test_temp_column_name_one_already():
df1 = pd.DataFrame([{"_temp_1": "hi", "b": 2}, {"_temp_1": "bye", "b": 2}])
df2 = pd.DataFrame(
[{"_temp_1": "hi", "b": 2}, {"_temp_1": "bye", "b": 2}, {"a": "back fo mo", "b": 3}]
)
actual = datacompy.temp_column_name(df1, df2)
assert actual == "_temp_0"
### Duplicate testing!
def test_simple_dupes_one_field():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2}])
compare = datacompy.Compare(df1, df2, join_columns=["a"])
assert compare.matches()
# Just render the report to make sure it renders.
t = compare.report()
def test_simple_dupes_two_fields():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2, "c": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2, "c": 2}])
compare = datacompy.Compare(df1, df2, join_columns=["a", "b"])
assert compare.matches()
# Just render the report to make sure it renders.
t = compare.report()
def test_simple_dupes_index():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2}])
df1.index = df1["a"]
df2.index = df2["a"]
df1.index.name = df2.index.name = None
compare = datacompy.Compare(df1, df2, on_index=True)
assert compare.matches()
# Just render the report to make sure it renders.
t = compare.report()
def test_simple_dupes_one_field_two_vals():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 0}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 0}])
compare = datacompy.Compare(df1, df2, join_columns=["a"])
assert compare.matches()
# Just render the report to make sure it renders.
t = compare.report()
def test_simple_dupes_one_field_two_vals():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 0}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 0}])
compare = datacompy.Compare(df1, df2, join_columns=["a"])
assert not compare.matches()
assert len(compare.df1_unq_rows) == 1
assert len(compare.df2_unq_rows) == 1
assert len(compare.intersect_rows) == 1
# Just render the report to make sure it renders.
t = compare.report()
def test_simple_dupes_one_field_three_to_two_vals():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 0}, {"a": 1, "b": 0}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 0}])
compare = datacompy.Compare(df1, df2, join_columns=["a"])
assert not compare.matches()
assert len(compare.df1_unq_rows) == 1
assert len(compare.df2_unq_rows) == 0
assert len(compare.intersect_rows) == 2
# Just render the report to make sure it renders.
t = compare.report()
def test_dupes_from_real_data():
data = """acct_id,acct_sfx_num,trxn_post_dt,trxn_post_seq_num,trxn_amt,trxn_dt,debit_cr_cd,cash_adv_trxn_comn_cntry_cd,mrch_catg_cd,mrch_pstl_cd,visa_mail_phn_cd,visa_rqstd_pmt_svc_cd,mc_pmt_facilitator_idn_num
100,0,2017-06-17,1537019,30.64,2017-06-15,D,CAN,5812,M2N5P5,,,0.0
200,0,2017-06-24,1022477,485.32,2017-06-22,D,USA,4511,7114,7.0,1,
100,0,2017-06-17,1537039,2.73,2017-06-16,D,CAN,5812,M4J 1M9,,,0.0
200,0,2017-06-29,1049223,22.41,2017-06-28,D,USA,4789,21211,,A,
100,0,2017-06-17,1537029,34.05,2017-06-16,D,CAN,5812,M4E 2C7,,,0.0
200,0,2017-06-29,1049213,9.12,2017-06-28,D,CAN,5814,0,,,
100,0,2017-06-19,1646426,165.21,2017-06-17,D,CAN,5411,M4M 3H9,,,0.0
200,0,2017-06-30,1233082,28.54,2017-06-29,D,USA,4121,94105,7.0,G,
100,0,2017-06-19,1646436,17.87,2017-06-18,D,CAN,5812,M4J 1M9,,,0.0
200,0,2017-06-30,1233092,24.39,2017-06-29,D,USA,4121,94105,7.0,G,
100,0,2017-06-19,1646446,5.27,2017-06-17,D,CAN,5200,M4M 3G6,,,0.0
200,0,2017-06-30,1233102,61.8,2017-06-30,D,CAN,4121,0,,,
100,0,2017-06-20,1607573,41.99,2017-06-19,D,CAN,5661,M4C1M9,,,0.0
200,0,2017-07-01,1009403,2.31,2017-06-29,D,USA,5814,22102,,F,
100,0,2017-06-20,1607553,86.88,2017-06-19,D,CAN,4812,H2R3A8,,,0.0
200,0,2017-07-01,1009423,5.5,2017-06-29,D,USA,5812,2903,,F,
100,0,2017-06-20,1607563,25.17,2017-06-19,D,CAN,5641,M4C 1M9,,,0.0
200,0,2017-07-01,1009433,214.12,2017-06-29,D,USA,3640,20170,,A,
100,0,2017-06-20,1607593,1.67,2017-06-19,D,CAN,5814,M2N 6L7,,,0.0
200,0,2017-07-01,1009393,2.01,2017-06-29,D,USA,5814,22102,,F,"""
df1 = pd.read_csv(io.StringIO(data), sep=",")
df2 = df1.copy()
compare_acct = datacompy.Compare(df1, df2, join_columns=["acct_id"])
assert compare_acct.matches()
compare_unq = datacompy.Compare(
df1, df2, join_columns=["acct_id", "acct_sfx_num", "trxn_post_dt", "trxn_post_seq_num"]
)
assert compare_unq.matches()
# Just render the report to make sure it renders.
t = compare_acct.report()
r = compare_unq.report()
def test_strings_with_joins_with_ignore_spaces():
df1 = pd.DataFrame([{"a": "hi", "b": " A"}, {"a": "bye", "b": "A"}])
df2 = pd.DataFrame([{"a": "hi", "b": "A"}, {"a": "bye", "b": "A "}])
compare = datacompy.Compare(df1, df2, "a", ignore_spaces=False)
assert not compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert not compare.intersect_rows_match()
compare = datacompy.Compare(df1, df2, "a", ignore_spaces=True)
assert compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_strings_with_joins_with_ignore_case():
df1 = pd.DataFrame([{"a": "hi", "b": "a"}, {"a": "bye", "b": "A"}])
df2 = pd.DataFrame([{"a": "hi", "b": "A"}, {"a": "bye", "b": "a"}])
compare = datacompy.Compare(df1, df2, "a", ignore_case=False)
assert not compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert not compare.intersect_rows_match()
compare = datacompy.Compare(df1, df2, "a", ignore_case=True)
assert compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_decimal_with_joins_with_ignore_spaces():
df1 = pd.DataFrame([{"a": 1, "b": " A"}, {"a": 2, "b": "A"}])
df2 = pd.DataFrame([{"a": 1, "b": "A"}, {"a": 2, "b": "A "}])
compare = datacompy.Compare(df1, df2, "a", ignore_spaces=False)
assert not compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert not compare.intersect_rows_match()
compare = datacompy.Compare(df1, df2, "a", ignore_spaces=True)
assert compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_decimal_with_joins_with_ignore_case():
df1 = pd.DataFrame([{"a": 1, "b": "a"}, {"a": 2, "b": "A"}])
df2 = | pd.DataFrame([{"a": 1, "b": "A"}, {"a": 2, "b": "a"}]) | pandas.DataFrame |
import pandas as pd
import yfinance as yf
def Coletar_Fundamentos(Tickers):
"""
Coleta Indicadores fundamentalistas por leitura das tabelas hmtl do site
Fundamentus.
Argumentos:
Tickers = String ou lista de Tickers
"""
df3 = pd.DataFrame(index=['P/L', 'P/VP', 'P/EBIT', 'PSR', 'P/Ativos', 'P/Cap. Giro',
'P/Ativ Circ Liq', 'Div. Yield', 'EV / EBITDA', 'EV / EBIT',
'Cres. Rec (5a)', 'LPA', 'VPA', 'Marg. Bruta', 'Marg. EBIT',
'Marg. Líquida', 'EBIT / Ativo', 'ROIC', 'ROE', 'Liquidez Corr',
'Div Br/ Patrim', 'Giro Ativos'])
if type(Tickers) is str:
Tickers = [Tickers]
for Ticker in Tickers:
df = | pd.read_html(f"http://www.fundamentus.com.br/detalhes.php?papel={Ticker}") | pandas.read_html |
# RHR Online Anomaly Detection & Alert Monitoring
######################################################
# Author: <NAME> #
# Email: <EMAIL> #
# Location: Dept.of Genetics, Stanford University #
# Date: Oct 29 2020 #
######################################################
# uses raw heart rate and steps data (this stpes data doesn't have zeroes and need to innfer from hr datetime stamp)
## simple command
# python rhrad_online_alerts.py --heart_rate hr.csv --steps steps.csv
## full command
# python rhrad_online_alerts.py --heart_rate pbb_fitbit_oldProtocol_hr.csv --steps pbb_fitbit_oldProtocol_steps.csv --myphd_id pbb_RHR_online --figure1 pbb_RHR_online_anomalies.pdf --anomalies pbb_RHR_online_anomalies.csv --symptom_date 2020-01-10 --diagnosis_date 2020-01-11 --outliers_fraction 0.1 --random_seed 10 --baseline_window 744 --sliding_window 1 --alerts pbb_RHR_online_alerts.csv --figure2 pbb_RHR_online_alerts.pdf
# python rhrad_online_alerts.py --heart_rate pbb_fitbit_oldProtocol_hr.csv \
# --steps pbb_fitbit_oldProtocol_steps.csv \
# --myphd_id pbb_RHR_online \
# --figure1 pbb_RHR_online_anomalies.pdf \
# --anomalies pbb_RHR_online_anomalies.csv \
# --symptom_date 2020-01-10 --diagnosis_date 2020-01-11 \
# --outliers_fraction 0.1 \
# --random_seed 10 \
# --baseline_window 744 --sliding_window 1
# --alerts pbb_RHR_online_alerts.csv \
# --figure2 pbb_RHR_online_alerts.pdf
import warnings
warnings.filterwarnings('ignore')
import sys
import argparse
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
#%matplotlib inline
import seaborn as sns
from statsmodels.tsa.seasonal import seasonal_decompose
from sklearn.preprocessing import StandardScaler
from sklearn.covariance import EllipticEnvelope
####################################
parser = argparse.ArgumentParser(description='Find anomalies in wearables time-series data.')
parser.add_argument('--heart_rate', metavar='', help ='raw heart rate count with a header = heartrate')
parser.add_argument('--steps',metavar='', help ='raw steps count with a header = steps')
parser.add_argument('--myphd_id',metavar='', default = 'myphd_id', help ='user myphd_id')
parser.add_argument('--anomalies', metavar='', default = 'myphd_id_anomalies.csv', help='save predicted anomalies as a CSV file')
parser.add_argument('--figure1', metavar='', default = 'myphd_id_anomalies.pdf', help='save predicted anomalies as a PDF file')
parser.add_argument('--symptom_date', metavar='', default = 'NaN', help = 'symptom date with y-m-d format')
parser.add_argument('--diagnosis_date', metavar='', default = 'NaN', help='diagnosis date with y-m-d format')
parser.add_argument('--outliers_fraction', metavar='', type=float, default=0.1, help='fraction of outliers or anomalies')
parser.add_argument('--random_seed', metavar='', type=int, default=10, help='random seed')
parser.add_argument('--baseline_window', metavar='',type=int, default=744, help='baseline window is used for training (in hours)')
parser.add_argument('--sliding_window', metavar='',type=int, default=1, help='sliding window is used to slide the testing process each hour')
parser.add_argument('--alerts', metavar='', default = 'myphd_id_alerts.csv', help='save predicted anomalies as a CSV file')
parser.add_argument('--figure2', metavar='', default = 'myphd_id_alerts.pdf', help='save predicted anomalies as a PDF file')
args = parser.parse_args()
# as arguments
fitbit_oldProtocol_hr = args.heart_rate
fitbit_oldProtocol_steps = args.steps
myphd_id = args.myphd_id
myphd_id_anomalies = args.anomalies
myphd_id_figure1 = args.figure1
symptom_date = args.symptom_date
diagnosis_date = args.diagnosis_date
RANDOM_SEED = args.random_seed
outliers_fraction = args.outliers_fraction
baseline_window = args.baseline_window
sliding_window = args.sliding_window
myphd_id_alerts = args.alerts
myphd_id_figure2 = args.figure2
####################################
class RHRAD_online:
# Infer resting heart rate ------------------------------------------------------
def resting_heart_rate(self, heartrate, steps):
"""
This function uses heart rate and steps data to infer resting heart rate.
It filters the heart rate with steps that are zero and also 12 minutes ahead.
"""
# heart rate data
df_hr = pd.read_csv(fitbit_oldProtocol_hr)
df_hr = df_hr.set_index('datetime')
df_hr.index.name = None
df_hr.index = pd.to_datetime(df_hr.index)
# steps data
df_steps = pd.read_csv(fitbit_oldProtocol_steps)
df_steps = df_steps.set_index('datetime')
df_steps.index.name = None
df_steps.index = pd.to_datetime(df_steps.index)
# merge dataframes
#df_hr = df_hr.resample('1min').mean()
#df_steps = df_steps.resample('1min').mean()
# added "outer" paramter for merge function to adjust the script to the new steps format
#df1 = pd.merge(df_hr, df_steps, left_index=True, right_index=True)
df1 = pd.merge(df_hr, df_steps, left_index=True, right_index=True, how="outer")
df1 = df1[pd.isnull(df1).any(axis=1)].fillna(0)
df1 = df1.rename(columns={"value_x": "heartrate", "value_y": "steps"})
df1 = df1.resample('1min').mean()
print(myphd_id)
print("Data size (in miutes) before removing missing data")
print(df1.shape)
ax = df1.plot(figsize=(20,4), title=myphd_id)
ax.figure.savefig(myphd_id+'_data.png')
#print(df1)
df1 = df1.dropna(how='any')
df1 = df1.loc[df1['heartrate']!=0]
print("Data size (in miutes) after removing missing data")
print(df1.shape)
#print(df1)
# define RHR as the HR measurements recorded when there were less than two steps taken during a rolling time window of the preceding 12 minutes (including the current minute)
df1['steps'] = df1['steps'].apply(np.int64)
df1['steps_window_12'] = df1['steps'].rolling(12).sum()
df1 = df1.loc[(df1['steps_window_12'] == 0 )]
print(df1['heartrate'].describe())
print(df1['steps_window_12'].describe())
# impute missing data
#df1 = df1.resample('1min').mean()
#df1 = df1.ffill()
print("No.of timesteps for RHR (in minutes)")
print(df1.shape)
return df1
# Pre-processing ------------------------------------------------------
def pre_processing(self, resting_heart_rate):
"""
This function takes resting heart rate data and applies moving averages to smooth the data and
downsamples to one hour by taking the avegare values
"""
# smooth data
df_nonas = df1.dropna()
df1_rom = df_nonas.rolling(400).mean()
# resample
df1_resmp = df1_rom.resample('1H').mean()
df2 = df1_resmp.drop(['steps'], axis=1)
df2 = df2.dropna()
print("No.of timesteps for RHR (in hours)")
print(df2.shape)
return df2
# Seasonality correction ------------------------------------------------------
def seasonality_correction(self, resting_heart_rate, steps):
"""
This function takes output pre-processing and applies seasonality correction
"""
sdHR_decomposition = seasonal_decompose(sdHR, model='additive', freq=1)
sdSteps_decomposition = seasonal_decompose(sdSteps, model='additive', freq=1)
sdHR_decomp = pd.DataFrame(sdHR_decomposition.resid + sdHR_decomposition.trend)
sdHR_decomp.rename(columns={sdHR_decomp.columns[0]:'heartrate'}, inplace=True)
sdSteps_decomp = pd.DataFrame(sdSteps_decomposition.resid + sdSteps_decomposition.trend)
sdSteps_decomp.rename(columns={sdSteps_decomp.columns[0]:'steps_window_12'}, inplace=True)
frames = [sdHR_decomp, sdSteps_decomp]
data = pd.concat(frames, axis=1)
#print(data)
#print(data.shape)
return data
# Train model and predict anomalies ------------------------------------------------------
def online_anomaly_detection(self, data_seasnCorec, baseline_window, sliding_window):
"""
# split the data, standardize the data inside a sliding window
# parameters - 1 month baseline window and 1 hour sliding window
# fit the model and predict the test set
"""
for i in range(baseline_window, len(data_seasnCorec)):
data_train_w = data_seasnCorec[i-baseline_window:i]
# train data normalization ------------------------------------------------------
data_train_w += 0.1
standardizer = StandardScaler().fit(data_train_w.values)
data_train_scaled = standardizer.transform(data_train_w.values)
data_train_scaled_features = pd.DataFrame(data_train_scaled, index=data_train_w.index, columns=data_train_w.columns)
data = pd.DataFrame(data_train_scaled_features)
data_1 = pd.DataFrame(data).fillna(0)
data_1['steps'] = '0'
data_1['steps_window_12'] = (data_1['steps'])
data_train_w = data_1
data_train.append(data_train_w)
data_test_w = data_seasnCorec[i:i+sliding_window]
# test data normalization ------------------------------------------------------
data_test_w += 0.1
data_test_scaled = standardizer.transform(data_test_w.values)
data_scaled_features = pd.DataFrame(data_test_scaled, index=data_test_w.index, columns=data_test_w.columns)
data = pd.DataFrame(data_scaled_features)
data_1 = pd.DataFrame(data).fillna(0)
data_1['steps'] = '0'
data_1['steps_window_12'] = (data_1['steps'])
data_test_w = data_1
data_test.append(data_test_w)
# fit the model ------------------------------------------------------
model = EllipticEnvelope(random_state=RANDOM_SEED,
support_fraction=0.7,
contamination=outliers_fraction).fit(data_train_w)
# predict the test set
preds = model.predict(data_test_w)
#preds = preds.rename(lambda x: 'anomaly' if x == 0 else x, axis=1)
dfs.append(preds)
# Merge predictions ------------------------------------------------------
def merge_test_results(self, data_test):
"""
Merge predictions
"""
# concat all test data (from sliding window) with their datetime index and others
data_test = pd.concat(data_test)
# merge predicted anomalies from test data with their corresponding index and other features
preds = pd.DataFrame(dfs)
preds = preds.rename(lambda x: 'anomaly' if x == 0 else x, axis=1)
data_test_df = | pd.DataFrame(data_test) | pandas.DataFrame |
################################################################################
# This module retrieves synonyms from Wordnet as a part of NLTK module and its
# corpus. The module recognizes each input pandas.DataFrame record as a unit of
# assessment content (i.e. a single passage section, an item stem,
# or an item option) and applies a serial number of 'AC_Doc_ID' to the each
# output record for the following processing.
# Parameters df_ac: input pandas.DataFrame, it should have, at least, one
# column of lemmatized text assessment content
# content_lemma_column: column name of lemmatized text assessment
# content (as an output text from the
# lemmatizer) to search Wordnet with the lemmas
# lang = 'En' : Language option ('En' or 'Jp')
# wnjpn_dic = None: dictionary of Japanese WordNet ('Jp' only)
# Returns Result: pandas.DataFrame including the original columns of the input
# DataFrame plus result synsets, the result synsets include
# the original input lemmas as well
################################################################################
def ac_synset(df_ac, content_lemma_column, lang = 'En', wnjpn_dic = None):
import pandas as pd
import numpy as np
if lang != 'Jp' or wnjpn_dic == None:
import nltk
from nltk.corpus import wordnet as wn
df_ac_buf = df_ac.copy()
list_cntnt = list(df_ac_buf[content_lemma_column])
list_cntnt_synset = list_cntnt[:]
list_doc_id = list_cntnt[:]
df_synset_all = pd.DataFrame()
for i, x in enumerate(list_cntnt):
if lang == 'Jp':
tokens = x.split(' ')
synset_list = tokens
for y in tokens:
if y in wnjpn_dic:
synset_list = synset_list + wnjpn_dic[y]
#s = ' '.join(synset_list)
s = ' '.join(map(str, synset_list))
list_cntnt_synset[i] = s
print(s)
df_synset = pd.DataFrame({ 'Synset' : synset_list })
else:
tokens = nltk.word_tokenize(x)
synset_list = tokens
for y in tokens:
for synset in wn.synsets(y):
synset_list = synset_list + synset.lemma_names()
s = ' '.join(map(str, synset_list))
list_cntnt_synset[i] = s
print(s)
lower_synset_list = [w.lower() for w in synset_list]
df_synset = pd.DataFrame({ 'Synset' : lower_synset_list })
df_doc = pd.DataFrame({ 'AC_Doc_ID' : np.array([i] * len(df_synset)) })
df_synset['AC_Doc_ID'] = df_doc['AC_Doc_ID']
df_synset['Dummy'] = df_doc['AC_Doc_ID']
df_synset_all = df_synset_all.append(df_synset)
list_doc_id[i] = i
df_doc_id = pd.DataFrame({ 'AC_Doc_ID' : list_doc_id })
df_ac_buf['AC_Doc_ID'] = df_doc_id['AC_Doc_ID']
df_cntnt_synset = pd.DataFrame({ 'Cntnt_Synset' : list_cntnt_synset })
df_ac_buf['Cntnt_Synset'] = df_cntnt_synset['Cntnt_Synset']
#Updated 1/16/2017 <EMAIL>
if df_synset_all.shape[0] > 0:
#Updated 3/4/2017 <EMAIL>
pd_ver = list(map(int, | pd.__version__.split('.') | pandas.__version__.split |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 16 09:15:54 2016
@author: <NAME>
"""
import pandas as pd
import numpy as np
###### Import packages needed for the make_vars functions
from scipy.interpolate import interp1d
import pywt
from skimage.filters.rank import entropy
from skimage.morphology import rectangle
from skimage.util import img_as_ubyte
def make_dwt_vars_cD(wells_df,logs,levels,wavelet):
wave= pywt.Wavelet(wavelet)
grouped = wells_df.groupby(['Well Name'])
new_df = pd.DataFrame()
for key in grouped.groups.keys():
depth = grouped.get_group(key)['Depth']
temp_df = pd.DataFrame()
temp_df['Depth'] = depth
for log in logs:
temp_data = grouped.get_group(key)[log]
cA_4, cD_4, cD_3, cD_2, cD_1 = pywt.wavedec(temp_data,wave,level=4,mode='symmetric')
dict_cD_levels = {1:cD_1, 2:cD_2, 3:cD_3, 4:cD_4}
for i in levels:
new_depth = np.linspace(min(depth),max(depth),len(dict_cD_levels[i]))
fA = interp1d(new_depth,dict_cD_levels[i],kind='nearest')
temp_df[log + '_cD_level_' + str(i)] = fA(depth)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_dwt_vars_cA(wells_df,logs,levels,wavelet):
wave= pywt.Wavelet(wavelet)
grouped = wells_df.groupby(['Well Name'])
new_df = pd.DataFrame()
for key in grouped.groups.keys():
depth = grouped.get_group(key)['Depth']
temp_df = pd.DataFrame()
temp_df['Depth'] = depth
for log in logs:
temp_data = grouped.get_group(key)[log]
for i in levels:
cA_cD = pywt.wavedec(temp_data,wave,level=i,mode='symmetric')
cA = cA_cD[0]
new_depth = np.linspace(min(depth),max(depth),len(cA))
fA = interp1d(new_depth,cA,kind='nearest')
temp_df[log + '_cA_level_' + str(i)] = fA(depth)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_entropy_vars(wells_df,logs,l_foots):
new_df = pd.DataFrame()
grouped = wells_df.groupby(['Well Name'])
for key in grouped.groups.keys():
depth = grouped.get_group(key)['Depth']
temp_df = pd.DataFrame()
temp_df['Depth'] = depth
for log in logs:
temp_data = grouped.get_group(key)[log]
image = np.vstack((temp_data,temp_data,temp_data))
image -= np.median(image)
image /= np.max(np.abs(image))
image = img_as_ubyte(image)
for l_foot in l_foots:
footprint = rectangle(l_foot,3)
temp_df[log + '_entropy_foot' + str(l_foot)] = entropy(image,footprint)[0,:]
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_gradient_vars(wells_df,logs,dx_list):
new_df = pd.DataFrame()
grouped = wells_df.groupby(['Well Name'])
for key in grouped.groups.keys():
depth = grouped.get_group(key)['Depth']
temp_df = pd.DataFrame()
temp_df['Depth'] = depth
for log in logs:
temp_data = grouped.get_group(key)[log]
for dx in dx_list:
temp_df[log + 'gradient_dx' + str(dx)] = np.gradient(temp_data,dx)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_moving_av_vars(wells_df,logs,windows):
new_df = pd.DataFrame()
grouped = wells_df.groupby(['Well Name'])
for key in grouped.groups.keys():
temp_df = pd.DataFrame()
temp_df['Depth'] = grouped.get_group(key)['Depth']
for log in logs:
temp_data = grouped.get_group(key)[log]
for window in windows:
temp_df[log + '_moving_av_' + str(window) + 'ft'] = pd.rolling_mean(arg=temp_data, window=window, min_periods=1, center=True)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_moving_std_vars(wells_df,logs,windows):
new_df = pd.DataFrame()
grouped = wells_df.groupby(['Well Name'])
for key in grouped.groups.keys():
temp_df = pd.DataFrame()
temp_df['Depth'] = grouped.get_group(key)['Depth']
for log in logs:
temp_data = grouped.get_group(key)[log]
for window in windows:
temp_df[log + '_moving_std_' + str(window) + 'ft'] = pd.rolling_std(arg=temp_data, window=window, min_periods=1, center=True)
new_df = new_df.append(temp_df)
new_df = new_df.sort_index()
new_df = new_df.drop(['Depth'],axis=1)
return new_df
def make_moving_max_vars(wells_df,logs,windows):
new_df = pd.DataFrame()
grouped = wells_df.groupby(['Well Name'])
for key in grouped.groups.keys():
temp_df = pd.DataFrame()
temp_df['Depth'] = grouped.get_group(key)['Depth']
for log in logs:
temp_data = grouped.get_group(key)[log]
for window in windows:
temp_df[log + '_moving_max_' + str(window) + 'ft'] = | pd.rolling_max(arg=temp_data, window=window, min_periods=1, center=True) | pandas.rolling_max |
import os
from typing import List
import numpy as np
import pandas as pd
import pytest
from pandas import DataFrame
from data_domain import CategoricalDataDomain, RealDataDomain
from privacy_budget import PrivacyBudget
from private_table import PrivateTable
from utils import check_absolute_error
@pytest.fixture
def example_table():
"""creating a table from the iris dataset"""
iris_data = pd.read_csv(os.path.join("dataset", "iris_data.txt"),
names=["Sepal Length", "Sepal Width", "Petal Length",
"Petal Width", "Class"])
data = {'Sepal Length': iris_data["Sepal Length"].tolist(),
'Sepal Width': iris_data["Sepal Width"].tolist(),
'Petal Length': iris_data["Petal Length"].tolist(),
'Petal Width': iris_data["Petal Width"].tolist(),
'Class': iris_data["Class"].tolist()}
df = pd.DataFrame(data)
return df
@ pytest.fixture
def example_private_table():
iris_data = pd.read_csv(os.path.join("dataset", "iris_data.txt"),
names=["Sepal Length", "Sepal Width", "Petal Length",
"Petal Width", "Class"])
data = {'Sepal Length': iris_data["Sepal Length"].tolist(),
'Sepal Width': iris_data["Sepal Width"].tolist(),
'Petal Length': iris_data["Petal Length"].tolist(),
'Petal Width': iris_data["Petal Width"].tolist(),
'Class': iris_data["Class"].tolist()}
df = | pd.DataFrame(data) | pandas.DataFrame |
from __future__ import absolute_import, division, print_function
import datetime
import pandas as pd
from config import *
def _drop_in_time_slice(m2m, m2b, m5cb, time_slice, to_drop):
"""Drops certain members from data structures, only in a given time slice.
This can be useful for removing people who weren't there on a specific day, or non-participants.
"""
logger.debug("Removing data: {} {}".format(time_slice, to_drop))
m2m.drop(m2m.loc[(time_slice, slice(None), to_drop), :].index, inplace=True)
m2m.drop(m2m.loc[(time_slice, to_drop, slice(None)), :].index, inplace=True)
m2b.drop(m2b.loc[(time_slice, to_drop, slice(None)), :].index, inplace=True)
m5cb.drop(m5cb.loc[(time_slice, to_drop), :].index, inplace=True)
def _clean_m2m(where, participation_dates, battery_sundays):
logger.info('loading m2m')
m2m = pd.read_hdf(dirty_store_path, 'proximity/member_to_member', where=where)
logger.info("original m2m len: {}".format(len(m2m)))
if len(m2m) == 0:
return
logger.info('cleaning m2m')
m2m.reset_index(inplace=True)
# Mark all records as not to keep. This removes all non-participants
m2m['keep'] = False
# For m2m, we need to look on both sides. Therefore, for each participating member, we will
# turn on a "keep" flag if the member is valid on either sides of the connection. Then, we will only keep
# records in which both sides are valid
logger.info('Keeping only dates relevant dates for each participant')
i = 0
total_count = len(participation_dates)
for item, p in participation_dates.iterrows():
i += 1
logger.debug("({}/{}) {},{},{}".format(i, total_count, p.member, p.start_date_ts, p.end_date_ts))
side1_cond = ((m2m.member1 == p.member) & (m2m.datetime >= p.start_date_ts) & (m2m.datetime < p.end_date_ts))
m2m.loc[side1_cond, 'keep_1'] = True
side2_cond = ((m2m.member2 == p.member) & (m2m.datetime >= p.start_date_ts) & (m2m.datetime < p.end_date_ts))
m2m.loc[side2_cond, 'keep_2'] = True
m2m.loc[(m2m.keep_1 == True) & (m2m.keep_2 == True), 'keep'] = True
del m2m['keep_1']
del m2m['keep_2']
logger.info('So far, keeping {} rows'.format(len(m2m[m2m['keep'] == True])))
# Remove times of battery changes
logger.info('Removing times of battery changes')
i = 0
total_count = len(battery_sundays)
for item, s in battery_sundays.iterrows():
i += 1
logger.debug("({}/{}) {},{}".format(i, total_count, s.battery_period_start, s.battery_period_end))
cond = ((m2m.datetime >= s.battery_period_start) & (m2m.datetime <= s.battery_period_end))
m2m.loc[cond, 'keep'] = False
logger.info('So far, keeping {} rows'.format(len(m2m[m2m['keep'] == True])))
m2m = m2m[m2m.keep == True]
logger.info("after cleaning: {}".format(len(m2m)))
del m2m['keep']
m2m.set_index(['datetime','member1','member2'], inplace=True)
logger.info("appending cleaned m2m to {}".format(clean_store_path))
with | pd.HDFStore(clean_store_path) | pandas.HDFStore |
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeClassifier as DT
from sklearn.ensemble import RandomForestClassifier as RF
from sklearn.ensemble import GradientBoostingClassifier as GB
from sklearn.feature_selection import f_classif as ANOVA
from matplotlib import pyplot as plt
from sklearn.linear_model import LogisticRegression as LR
np.random.seed(42)
features = ['ageOfDomain', 'hasHttps', 'urlLength', 'prefixSuffix', 'hasIP', 'hasAt', 'redirects', 'shortenUrl', 'domainRegLength', 'DNSrecord', 'webTraffixAlexa', 'multSubDomains']
df1 = pd.read_csv('phish-0_w.csv', index_col=0)
df2 = pd.read_csv('alexa-0_w.csv', index_col=0)
df3 = pd.read_csv('phish0_5000.csv', index_col=0)
df4 = pd.read_csv('alexa0_5000.csv', index_col=0)
df5 = pd.read_csv('phish-0_k.csv')
df6 = pd.read_csv('alexa-0_k.csv')
feat1 = list(df1.columns)
feat2 = list(df3.columns)
feat = feat1[1:] + feat2[2:]
feat = feat + ['input_count']
# print (feat)
df1 = df1[features]
df2 = df2[features]
# print('df2 columns', df2.columns)
df3 = df3.drop('url', 1)
df4 = df4.drop('url', 1)
df3 = df3.drop('distance', 1)
df4 = df4.drop('distance', 1)
# print(df5.columns)
df5 = np.array(df5['input_count']).reshape(-1,1)
df6 = np.array(df6['input_count']).reshape(-1,1)
df1 = df1.values
df2 = df2.values
df3 = df3.values
df4 = df4.values
# print('df1 and df2:',df1.shape, df2.shape)
# print('df3 and df4:',df3.shape, df4.shape)
# print('df6 and df5:',df6.shape, df5.shape)
df1 = np.concatenate((df1, df3, df5), axis = 1)
df2 = np.concatenate((df2, df4, df6), axis = 1)
# print('df1 and df2:', df1.shape, df2.shape)
num_files = 5000
num_urls = num_files
indices = np.random.permutation(min(df1.shape[0], df2.shape[0]))
df1 = df1[indices,:]
indices = np.random.permutation(df1.shape[0])
df2 = df2[indices,:]
df3 = df1
frac = int(0.5 * num_files)
# print(num_files, frac)
x_train = df2[:frac, :]
x_train = np.concatenate((x_train, df3[:frac, :]), axis = 0)
y_train = np.zeros(frac)
y_train = np.concatenate((y_train, np.ones(frac)), axis = 0)
x_test = df2[frac:, :]
x_test = np.concatenate((x_test, df3[frac:, :]), axis = 0)
y_test = np.zeros(num_urls-frac)
y_test = np.concatenate((y_test, np.ones(num_urls-frac)), axis = 0)
# print(x_train.shape, x_test.shape)
# print(x_train.shape, x_test.shape)
test_all = {}
train_all = {}
# -------------
# RANDOM FOREST
# -------------
print('\nRandom Forest')
clf2 = RF()
clf2.fit(x_train, y_train)
print('test')
preds = clf2.predict(x_test)
print(sum(preds==y_test)/len(preds))
test_all['Random Forest'] = sum(preds==y_test)/len(preds)
print('train')
preds = clf2.predict(x_train)
print(sum(preds==y_train)/len(preds))
train_all['Random Forest'] = sum(preds==y_train)/len(preds)
# -------------
# DECISION TREE
# -------------
print('\nDecision Tree')
dpth = []
acc = []
acc_train = []
criterion = 'entropy'
for max_depth in range(2,20):
clf = DT(max_depth = max_depth, criterion = criterion)
clf.fit(x_train, y_train)
preds = clf.predict(x_test)
dpth.append(max_depth)
acc.append(sum(preds==y_test)/len(preds))
preds = clf.predict(x_train)
acc_train.append(sum(preds==y_train)/len(preds))
arg = np.argmax(acc)
max_depth = dpth[arg]
clf = DT(max_depth = max_depth, criterion = criterion)
clf.fit(x_train, y_train)
print('test')
preds = clf.predict(x_test)
print(sum(preds==y_test)/len(preds))
test_all['Decision Tree'] = sum(preds==y_test)/len(preds)
print('train')
preds = clf.predict(x_train)
print(sum(preds==y_train)/len(preds))
train_all['Decision Tree'] = sum(preds==y_train)/len(preds)
# -------------------
# LOGISTIC REGRESSION
# -------------------
print('\nLogistic Regression')
clf = LR(tol=1e-8, C=250.0, max_iter=4e6)
clf.fit(x_train, y_train)
# print(x_train.shape)
print('test')
preds = clf.predict(x_test)
print(sum(preds==y_test)/len(preds))
test_all['Logistic Regression'] = sum(preds==y_test)/len(preds)
print('train')
preds = clf.predict(x_train)
print(sum(preds==y_train)/len(preds))
train_all['Logistic Regression'] = sum(preds==y_train)/len(preds)
fig = plt.figure(3)
plt.plot(dpth, acc, label = 'Test')
plt.plot(dpth, acc_train, label = 'Train')
plt.legend(loc='upper left')
plt.xticks(dpth, dpth)
plt.ylabel('Classification Accuracy')
plt.xlabel('Depth')
plt.tight_layout()
fig.savefig('num_tree_acc.png', dpi=200, format = 'png', transparent=True)
# print(test_all)
results = pd.DataFrame(test_all, index=[0])
print(results)
results.to_csv('accuracy.csv', index=False)
############################################################
df1 = pd.read_csv('phish-1_w.csv', index_col=0)
df2 = pd.read_csv('alexa-1_w.csv', index_col=0)
df3 = | pd.read_csv('phish1_2500.csv', index_col=0) | pandas.read_csv |
from download_gps_data import download_data
from extract_stations import extract_stations, output_extracted_stations
import pandas as pd
from plot_extracted_stations import plot_extracted_stations
## PARAMETERS
minLatitude = -90
maxLatitude = 90
minLongitude = -180
maxLongitude = 180
sttime = "2017-01-01" #starttime of data request
edtime = "2017-12-31" #endtime of data request
plot_stations = 1
##################################################
extracted_stations, ext_stns_df = extract_stations(minlat=minLatitude,maxlat=maxLatitude,minlon=minLongitude,maxlon=maxLongitude)
# output_extracted_stations(ext_stns_df)
# stn = extracted_stations[0]
downloaded_data = | pd.DataFrame(columns=['StnCode','Latitude','Longitude','Elev']) | pandas.DataFrame |
#!/usr/bin/env python3
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
from xdsl.xdsl_opt_main import xDSLOptMain
from io import IOBase
from src.ibis_frontend import ibis_to_xdsl
from dialects.ibis_dialect import Ibis
class RelOptMain(xDSLOptMain):
def register_all_frontends(self):
super().register_all_frontends()
def parse_ibis(f: IOBase):
import ibis
import pandas as pd
connection = ibis.pandas.connect(
{"t": | pd.DataFrame({"a": ["AS", "EU", "NA"]}) | pandas.DataFrame |
import pandas as pd
import numpy as np
from scipy import interpolate
import os, sys
def pseudo_wells_model(zmin, zmax, sr, no_wells, zones={}, zones_ss={}, depth='Depth', zone_idx='Zone_idx', zone_col='Zone'):
depth_log = np.arange(zmin, zmax, sr)
pseudo_wells = pd.DataFrame(np.zeros((len(depth_log), no_wells)))
pseudo_wells[depth] = depth_log
zones_df = pd.DataFrame()
zones_df[depth] = [float(i) for i in zones.values()]
zones_df[zone_col] = zones.keys()
pseudo_wells = pd.merge_asof(pseudo_wells, zones_df, on=depth)
zone_dict = dict(zip(pseudo_wells[zone_col].unique(), [int(i) for i in range(len(pseudo_wells[zone_col].unique()))]))
pseudo_wells[zone_idx] = pseudo_wells[zone_col].map(zone_dict)
for zone in zones_ss.keys():
if zones_ss[zone] != 0:
for well in range(no_wells):
ntg = 100* (well) / (no_wells - 1)
zone_list = pseudo_wells[pseudo_wells[zone_col] == zone][well].values
locs = []
for i in range(zones_ss[zone]):
if zones_ss[zone] > 1:
locs.append(int((len(zone_list)-1) * i/(zones_ss[zone]-1)))
else:
locs.append(0)
ones = 1
while (sum(zone_list)/len(zone_list)) < ntg/100:
zone_list = 0 * zone_list
disp = np.ones(ones)
if zones_ss[zone] == 1:
zone_list[0:ones] = disp
else:
for i in range(len(locs)):
if i == 0:
zone_list[0:ones] = disp
elif i == len(locs)-1:
zone_list[-ones:] = disp
break
else:
insert = int(locs[i]-(len(disp)/2))
zone_list[insert:insert+len(disp):1] = disp
ones += 1
ind = 0
for idx, row in pseudo_wells[pseudo_wells[zone_col] == zone].iterrows():
pseudo_wells.loc[row.name, well] = zone_list[ind]
ind += 1
return pseudo_wells
def dict_mapper(row, sand, shale, no_wells, zone_col):
for i in range(no_wells):
if row[i] == 0:
row[i] = sand[row[zone_col]]
else:
row[i] = shale[row[zone_col]]
return row
def property_mapper(pseudo_wells, sand_density, shale_density, sand_vp, shale_vp, sand_vs, shale_vs, zone_col='Zone'):
no_wells = len(pseudo_wells.columns) - 3
density = pseudo_wells.apply(dict_mapper, args=(sand_density, shale_density, no_wells, zone_col), axis=1)
vp = pseudo_wells.apply(dict_mapper, args=(sand_vp, shale_vp, no_wells, zone_col), axis=1)
vs = pseudo_wells.apply(dict_mapper, args=(sand_vs, shale_vs, no_wells, zone_col), axis=1)
return density, vp, vs
def time_model(pseudo_wells, density, vp, vs, wcs_file, skip=1, zones={}, time='Time', depth='Depth', zone_idx='Zone_idx', zone='Zone'):
wcs = np.loadtxt(wcs_file, skiprows=skip)
idx1 = (np.abs(np.asarray(wcs[:,0]) - pseudo_wells[depth].min())).argmin()
idx2 = (np.abs(np.asarray(wcs[:,0]) - pseudo_wells[depth].max())).argmin()
time_frame = np.arange(np.around(wcs[idx1,1], decimals=0), np.around(wcs[idx2,1], decimals=0), 2)
depth_frame = time_frame * 0
for i in range(len(depth_frame)):
idx = (np.abs(np.asarray(wcs[:,1]) - time_frame[i])).argmin()
depth_frame[i] = np.around(wcs[idx,0], decimals=0)
df_sampled = pd.DataFrame()
df_sampled[depth] = depth_frame
df_sampled[time] = time_frame
dens_twt = pd.DataFrame()
vp_twt = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/python3
import pandas as pd
import subprocess
import os
import matplotlib.pyplot as plt
import numpy as np
import time
import glob
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# Set up a bunch of settings to test, more than will be plotted to ensure that I can change things around to plot different values
dimensions = [(660, 120), (1000, 200), (2000, 400), (4000, 800), (8000, 1600)]
sim_times = [0.5, 0.2, 0.05, 0.01, 0.004]
omp_num_threads_tested = [1, 2, 3, 4, 5, 6]
sbatch_nodes_tested = [1, 2, 3, 4, 8]
# List of items to actually plot as different lines
omp_num_threads_plot = [1, 2, 4, 6]
sbatch_nodes_plot = [1, 2, 4, 8]
dimensions_plot = [(660, 120), (2000, 400), (8000, 1600)]
# Extract timing data from line
def get_time_from_timing_line(line):
string_time = line.split(" ")[3]
return float(string_time)
class CFDRunner:
"""
Class to handle running a configuration via slurm and process the output
"""
def __init__(self, id):
"""
Set the default parameters
Takes an id to keep config files separate
"""
self.x = 660
self.y = 120
self.t = 0.2
self.sbatch_nodes = 1
self.sbatch_tasks = 0.5
self.sbatch_time = "00:07:00"
self.omp_threads = 6
self.in_file = os.path.join("test", f"initial-{id}.bin")
self.out_file = os.path.join("test", f"completed-{id}.bin")
self.sbatch_file = os.path.join("test", f"submit-{id}.sbatch")
self.single_thread = False
def run(self):
"""
Run the slurm batch file and extract the slurm job id to read the file later
"""
process_output = subprocess.run(["sbatch", self.sbatch_file], stdout=subprocess.PIPE)
output_lines = process_output.stdout.decode().split("\n")
self.sbatch_id = output_lines[0].split(" ")[3]
def is_still_running(self):
"""
Check if the job still appears in the queue -> probably still running
"""
process_output = subprocess.run(["squeue"], stdout=subprocess.PIPE)
output_lines = process_output.stdout.decode().split("\n")
return any([self.sbatch_id in line for line in output_lines])
def parse_output(self):
"""
Parse the output into a dataframe of timing data
"""
with open(f"slurm-{self.sbatch_id}.out", "r") as fh:
lines = fh.readlines()
i = 0
# while i < len(lines) and "I am process" not in lines[i]:
# i += 1
# shape_output = lines[i]
timing_results = []
# Basically go line by line and extract the timing data
# If a timestep label is seen it knows a new set of measurements is starting
# Add the current of the data to the dataframe
# Note: Uses this weird method because it wasn't known which order the measurements would be output in
current_time = None
timestep_time_taken = None
compute_velocity_time_taken = None
rhs_time_taken = None
possion_time_taken = None
update_velocity_time_taken = None
boundary_time_taken = None
sync_time_taken = None
possion_p_loop_time_taken = None
possion_res_loop_time_taken = None
for line in lines[i:]:
try:
if "--- Timestep" in line:
if current_time is not None:
timing_results.append([
current_time,
timestep_time_taken,
compute_velocity_time_taken,
rhs_time_taken,
possion_time_taken,
update_velocity_time_taken,
boundary_time_taken,
sync_time_taken,
possion_p_loop_time_taken,
possion_res_loop_time_taken,
])
current_time = float(line.split(" ")[3])
elif "timestep_time_taken" in line:
timestep_time_taken = float(line.split(" ")[1])
elif "compute_velocity_time_taken" in line:
compute_velocity_time_taken = float(line.split(" ")[1])
elif "rhs_time_taken" in line:
rhs_time_taken = float(line.split(" ")[1])
elif "possion_time_taken" in line:
possion_time_taken = float(line.split(" ")[1])
elif "update_velocity_time_taken" in line:
update_velocity_time_taken = float(line.split(" ")[1])
elif "boundary_time_taken" in line:
boundary_time_taken = float(line.split(" ")[1])
elif "sync_time_taken" in line:
sync_time_taken = float(line.split(" ")[1])
elif "possion_p_loop_time_taken" in line:
possion_p_loop_time_taken = float(line.split(" ")[1])
elif "possion_res_loop_time_taken" in line:
possion_res_loop_time_taken = float(line.split(" ")[1])
except Exception as e:
print("Exception", e)
# Label the dataframe columns and return
df = pd.DataFrame(timing_results, columns=("Timestep", "timestep_time_taken", "compute_velocity_time_taken", "rhs_time_taken", "possion_time_taken", "update_velocity_time_taken", "boundary_time_taken", "sync_time_taken", "possion_p_loop_time_taken", "possion_res_loop_time_taken"))
return df
def save_sbatch(self):
"""
Export the configuration as a file to be run by sbatch
Bind to socket to avoid performing openmp across two sockets to avoid memory latency
"""
# Default to using the parallel implementation
command = f"time mpirun -n {self.sbatch_nodes} -npernode 1 --bind-to socket ./karman-par -x {self.x} -y {self.y} --infile {self.in_file} -o {self.out_file} -t {self.t}\n"
omp_line = f"export OMP_NUM_THREADS={self.omp_threads}\n"
# If singlethread use the other executable
if self.single_thread:
command = f"time ./karman -x {self.x} -y {self.y} --infile {self.in_file} -o {self.out_file} -t {self.t}\n"
omp_line = "\n"
# Write out the file
with open(self.sbatch_file, "w") as fh:
fh.writelines([
"#!/bin/bash\n",
"#SBATCH --job-name=cfd-graphs\n",
"#SBATCH --partition=cs402\n",
"#SBATCH --nice=9000\n",
"#SBATCH --ntasks-per-socket=1\n", # avoid going from socket to socket with openmp
f"#SBATCH --nodes={self.sbatch_nodes}\n",
f"#SBATCH --ntasks-per-node=1\n",
f"#SBATCH --cpus-per-task=12\n" # required for 6x scaling running on slurm scaled correctly with openmp up to 6 threads but after that failed to improve. I think it is only allocating one socket.
f"#SBATCH --time={self.sbatch_time}\n",
". /etc/profile.d/modules.sh\n",
"module purge\n",
"module load cs402-mpi\n",
omp_line,
command,
"#gprof ./karman\n",
"./bin2ppm < karman.bin > karman.ppm\n",
"./diffbin karman.vanilla.bin karman.bin\n",
])
def collect_data():
"""
Run all configurations
"""
all_df = pd.DataFrame({
"x": pd.Series(dtype='int32'),
"y": pd.Series(dtype='int32'),
"sbatch_nodes": | pd.Series(dtype='int32') | pandas.Series |
import os
import sys
import pickle
import numpy as np
import pandas as pd
import scipy.sparse as sp
from pathlib import Path
reaction_num = int(sys.argv[1])
with open('data/candidates_single.txt') as f:
candidates_smis = [s.rstrip() for s in f.readlines()]
n_candidates = len(candidates_smis)
candidates_smis = np.array(candidates_smis)
candidates_fps = sp.load_npz('data/candidates_fp_single.npz')
test = | pd.read_pickle('data/preprocessed_liu_dataset/test_sampled.pickle') | pandas.read_pickle |
import numpy as np
import pandas as pd
import us
import os
import gc
from datetime import timedelta
from numpy import linalg as la
from statsmodels.formula.api import ols
from cmdstanpy import CmdStanModel
import matplotlib.pyplot as plt
# os.chdir("/home/admin/gözdeproject/")
class ELECTION_2016:
def __init__(self):
"CONSTANTS"
lambda_ = 0.75
C_1 = np.ones([51, 51])
a = 1
self.polling_bias_scale = 0.013
self.random_walk_scale = 0.05 / np.sqrt(300)
self.sigma_measure_noise_national = 0.04
self.sigma_measure_noise_state = 0.04
self.sigma_c = 0.06
self.sigma_m = 0.04
self.sigma_pop = 0.04
self.sigma_e_bias = 0.02
self.run_date = pd.to_datetime("2016-11-08")
self.election_day = pd.to_datetime("2016-11-08")
self.start_date = pd.to_datetime("2016-03-01")
# day indices
self.df = self.get_df("data/all_polls.csv")
first_day = min(self.df["start"])
# getting states info from 2012
state2012 = pd.read_csv("data/2012.csv")
self.state_name = state2012["state_name"].values.tolist()
state2012["score"] = state2012["obama_count"] / (state2012["obama_count"] + state2012["romney_count"])
state2012["national score"] = sum(state2012["obama_count"]) / sum(
state2012["obama_count"] + state2012["romney_count"])
state2012["delta"] = state2012["score"] - state2012["national score"]
state2012["share_national_vote"] = (state2012["total_count"] * (1 + state2012["adult_pop_growth_2011_15"])) \
/ sum(state2012["total_count"] * (1 + state2012["adult_pop_growth_2011_15"]))
state2012 = state2012.sort_values("state")
self.state_abb = state2012["state"]
prior_diff_score = pd.DataFrame(state2012["delta"])
prior_diff_score.set_index(self.state_abb, inplace=True)
self.state_weights = pd.DataFrame(state2012["share_national_vote"] / sum(state2012["share_national_vote"]))
self.state_weights.set_index(self.state_abb.sort_values(), inplace=True)
##creating covariance matrices
# preparing data
state_data = pd.read_csv("data/abbr_list.csv")
state_data = state_data[["year", "state", "dem"]]
state_data = state_data[state_data["year"] == 2016]
state_data.rename(columns={"year": "variable", "dem": "value"}, inplace=True)
state_data = state_data[["state", "variable", "value"]]
census = pd.read_csv("data/acs_2013_variables.csv")
census.dropna(inplace=True)
census.drop(columns=["state_fips", "pop_total", "pop_density"], inplace=True)
census = census.melt(id_vars="state")
state_data = state_data.append(census)
# adding urbanicity
urbanicity = pd.read_csv("data/urbanicity_index.csv")
urbanicity.rename(columns={"average_log_pop_within_5_miles": "pop_density"}, inplace=True)
urbanicity = urbanicity[["state", "pop_density"]]
urbanicity = urbanicity.melt(id_vars="state")
state_data = state_data.append(urbanicity)
# adding white evangelical
white_pct = pd.read_csv("data/white_evangel_pct.csv")
white_pct = white_pct.melt(id_vars="state")
state_data = state_data.append(white_pct)
# spread the data
state_data_long = state_data.copy()
state_data_long["value"] = state_data_long.groupby("variable")["value"].transform(
lambda x: (x - x.min()) / (x.max() - x.min()))
state_data_long = state_data_long.pivot_table(index="variable", columns="state", values="value").reset_index(
"variable")
state_data_long.drop(columns=["variable"], inplace=True)
# creting and computing correlation matrix
# formula : a*(lambda*C + (1-lambda)*C_1)
# where C is corr matrix with min 0
# C_1 is sq matrix with all numbers 1
# lambda = 0 -> 100% corr, lambda = 1 -> our corr matrix
C = state_data_long.corr()
# make the values of C min 0
C = C.clip(lower=0)
tmp_C = C.copy()
np.fill_diagonal(tmp_C.values, np.nan)
A = (lambda_ * C + (1 - lambda_) * C_1)
new_C = self.nearestPD(A)
# making positive definite
state_correlation_polling = new_C
state_correlation_polling = self.nearestPD(state_correlation_polling)
# cov matrix for polling error
self.state_covariance_polling_bias = self.cov_matrix(51, 0.078 ** 2, 0.9)
self.state_covariance_polling_bias = self.state_covariance_polling_bias * state_correlation_polling
np.sqrt(self.state_weights.T @ self.state_covariance_polling_bias @ self.state_weights) / 4
# cov matrix for prior election day prediction
self.state_covariance_mu_b_T = self.cov_matrix(51, 0.18 ** 2, 0.9)
self.state_covariance_mu_b_T = self.state_covariance_mu_b_T * state_correlation_polling
np.sqrt(self.state_weights.T @ self.state_covariance_mu_b_T @ self.state_weights) / 4
# cov matrix for random walks
state_covariance_mu_b_walk = self.cov_matrix(51, 0.017 ** 2, 0.9)
# demo corrs to fill gaps in state polls
state_covariance_mu_b_walk = state_covariance_mu_b_walk * state_correlation_polling
(np.sqrt(self.state_weights.T @ state_covariance_mu_b_walk @ self.state_weights) / 4) * np.sqrt(300)
# Making default cov matrices:
# initial cov matrix
self.state_covariance_0 = self.cov_matrix(51, 0.07 ** 2, 0.9)
self.state_covariance_0 = self.state_covariance_0 * state_correlation_polling
np.sqrt(self.state_weights.T @ self.state_covariance_0 @ self.state_weights) / 4
diffdays_until_election = (self.election_day - self.run_date).days
expected_national_mu_b_T_error = self.fit_rmse_day_x(diffdays_until_election) # 0.03
self.mu_b_T_scale = expected_national_mu_b_T_error
# national_cov_matrix_error_sd = np.sqrt(self.state_weights.T @ self.state_covariance_0 @ self.state_weights) # 0.05
# cov_poll_bias = self.state_covariance_0 * ((self.polling_bias_scale / national_cov_matrix_error_sd * 4) ** 2).values[0][0]
# cov_mu_b_T = self.state_covariance_0 * ((self.mu_b_T_scale / national_cov_matrix_error_sd * 4) ** 2).values[0][0]
# cov_mu_b_walk = self.state_covariance_0 * ((self.random_walk_scale / national_cov_matrix_error_sd * 4) ** 2).values[0][0]
# creating priors:
abramowitz = | pd.read_csv("data/abramowitz_data.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed May 13 13:59:31 2020
@author: bernifoellmer
"""
import sys, os
import pandas as pd
import openpyxl
import ntpath
import datetime
from openpyxl.worksheet.datavalidation import DataValidation
from openpyxl.styles import Font, Color, Border, Side
from openpyxl.styles import colors
from openpyxl.styles import Protection
from openpyxl.styles import PatternFill
from glob import glob
from shutil import copyfile
from cta import update_table
#from discharge_extract import extract_specific_tags_df
from discharge_ncs import discharge_ncs
import numpy as np
from collections import defaultdict
from ActiveLearner import ActiveLearner, DISCHARGEFilter
#from featureSelection import featureSelection
from openpyxl.utils import get_column_letter
sys.path.append('H:/cloud/cloud_data/Projects/DL/Code/src')
sys.path.append('H:/cloud/cloud_data/Projects/DL/Code/src/ct')
from CTDataStruct import CTPatient
import keyboard
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from numpy.random import shuffle
from openpyxl.styles.differential import DifferentialStyle
from openpyxl import Workbook
from openpyxl.styles import Color, PatternFill, Font, Border
from openpyxl.styles.differential import DifferentialStyle
from openpyxl.formatting.rule import ColorScaleRule, CellIsRule, FormulaRule
from openpyxl.formatting import Rule
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def computeCTA(settings):
print('computeCTA')
#folderpath_master = 'H:/cloud/cloud_data/Projects/CACSFilter/data/discharge_master/discharge_master_01042020'
#date = folderpath_master.split('_')[-1]
#folderpath_components = os.path.join(folderpath_master, 'discharge_components_' + date)
#folderpath_sources = os.path.join(folderpath_master, 'discharge_sources_' + date)
#filepath_master = os.path.join(folderpath_master, 'discharge_master_' + date + '.xlsx')
#filepath_data = os.path.join(folderpath_components, 'discharge_data_' + date + '.xlsx')
filepath_dicom = settings['filepath_dicom']
filepath_master = settings['filepath_data']
filepath_ITT = settings['filepath_ITT']
filepath_phase_exclude_stenosis = settings['filepath_phase_exclude_stenosis']
filepath_stenosis_bigger_20_phases = settings['filepath_stenosis_bigger_20_phases']
filepath_prct = settings['filepath_prct']
filepath_ecrf = settings['filepath_ecrf']
#filepath_master = 'H:/cloud/cloud_data/Projects/CACSFilter/data/discharge_master/discharge_master_01042020/discharge_master_01042020.xlsx'
#filepath_master = 'H:/cloud/cloud_data/Projects/CACSFilter/data/discharge_master/discharge_master_01042020/discharge_master_01042020.xlsx'
df_discharge = pd.read_excel(filepath_dicom)
df_master = pd.read_pickle(filepath_master)
df_ITT = pd.read_excel(filepath_ITT)
df_phase_exclude_stenosis = | pd.read_excel(filepath_phase_exclude_stenosis) | pandas.read_excel |
import inspect
import json
import logging
import random
import re
import sys
from collections import defaultdict
from contextlib import redirect_stdout
from datetime import datetime, timedelta
from io import StringIO
from itertools import product
from os import getenv
from os.path import dirname, realpath
from pathlib import Path
from string import Template
import click
import discord
import dunamai as _dunamai
import hupper
import matplotlib
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytz
from dateutil.relativedelta import relativedelta
from humanize import naturaltime
from turnips.archipelago import Archipelago
from turnips.plots import plot_models_range
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError: # pragma: no cover
from yaml import Loader
__version__ = _dunamai.get_version(
"turbot", third_choice=_dunamai.Version.from_any_vcs
).serialize()
matplotlib.use("Agg")
PACKAGE_ROOT = Path(dirname(realpath(__file__)))
RUNTIME_ROOT = Path(".")
# application configuration files
DEFAULT_CONFIG_TOKEN = RUNTIME_ROOT / "token.txt"
DEFAULT_CONFIG_CHANNELS = RUNTIME_ROOT / "channels.txt"
# static application asset data
DATA_DIR = PACKAGE_ROOT / "data"
STRINGS_DATA_FILE = DATA_DIR / "strings.yaml"
FOSSILS_DATA_FILE = DATA_DIR / "fossils.txt"
FISH_DATA_FILE = DATA_DIR / "fish.csv"
BUGS_DATA_FILE = DATA_DIR / "bugs.csv"
ART_DATA_FILE = DATA_DIR / "art.csv"
# persisted user and application data
DB_DIR = RUNTIME_ROOT / "db"
DEFAULT_DB_FOSSILS = DB_DIR / "fossils.csv"
DEFAULT_DB_PRICES = DB_DIR / "prices.csv"
DEFAULT_DB_ART = DB_DIR / "art.csv"
DEFAULT_DB_USERS = DB_DIR / "users.csv"
DEFAULT_DB_FISH = DB_DIR / "fish.csv"
# temporary application files
TMP_DIR = RUNTIME_ROOT / "tmp"
GRAPHCMD_FILE = TMP_DIR / "graphcmd.png"
LASTWEEKCMD_FILE = TMP_DIR / "lastweek.png"
with open(STRINGS_DATA_FILE) as f:
STRINGS = load(f, Loader=Loader)
FISH = pd.read_csv(FISH_DATA_FILE)
BUGS = pd.read_csv(BUGS_DATA_FILE)
ART = pd.read_csv(ART_DATA_FILE)
with open(FOSSILS_DATA_FILE) as f:
FOSSILS_SET = frozenset([line.strip().lower() for line in f.readlines()])
FISH_SET = frozenset(FISH.drop_duplicates(subset="name").name.tolist())
BUGS_SET = frozenset(BUGS.drop_duplicates(subset="name").name.tolist())
ART_SET = frozenset(ART.drop_duplicates(subset="name").name.tolist())
COLLECTABLE_SET = FOSSILS_SET | FISH_SET | BUGS_SET | ART_SET
EMBED_LIMIT = 5 # more embeds in a row than this causes issues
USER_PREFRENCES = [
"hemisphere",
"timezone",
"island",
"friend",
"fruit",
"nickname",
"creator",
]
# Based on values from datetime.isoweekday()
DAYS = {
"monday": 1,
"tuesday": 2,
"wednesday": 3,
"thursday": 4,
"friday": 5,
"saturday": 6,
"sunday": 7,
}
IDAYS = dict(map(reversed, DAYS.items()))
class Validate:
FRUITS = ["apple", "cherry", "orange", "peach", "pear"]
HEMISPHERES = ["northern", "southern"]
@classmethod
def friend(cls, value):
code = re.sub("[^0-9]", "", value)
return code if len(code) == 12 and code.isdigit() else None
@classmethod
def creator(cls, value):
code = re.sub("[^0-9]", "", value)
return code if len(code) == 12 and code.isdigit() else None
@classmethod
def fruit(cls, value):
fruit = value.lower()
return fruit if fruit in cls.FRUITS else None
@classmethod
def hemisphere(cls, value):
home = value.lower()
return home if home in cls.HEMISPHERES else None
@classmethod
def nickname(cls, value):
return value
@classmethod
def timezone(cls, value):
return value if value in pytz.all_timezones_set else None
@classmethod
def island(cls, value):
return value
def s(key, **kwargs):
"""Returns a string from data/strings.yaml with subsitutions."""
data = STRINGS.get(key, "")
assert data, f"error: missing strings key: {key}"
return Template(data).substitute(kwargs)
def h(dt):
"""Convertes a datetime to something readable by a human."""
if hasattr(dt, "tz_convert"): # pandas-datetime-like objects
dt = dt.to_pydatetime()
naive_dt = dt.replace(tzinfo=None)
return naturaltime(naive_dt)
def day_and_time(dt):
"""Converts a datetime to a day and time of day, eg: Monday pm."""
day = IDAYS[dt.isoweekday()]
am_pm = "am" if dt.hour < 12 else "pm"
return f"{day.title()} {am_pm}"
def humanize_months(row):
"""Generator that humanizes months from row data where each month is a column."""
ABBR = {
0: "Jan",
1: "Feb",
2: "Mar",
3: "Apr",
4: "May",
5: "Jun",
6: "Jul",
7: "Aug",
8: "Sep",
9: "Oct",
10: "Nov",
11: "Dec",
}
months = [
row["jan"],
row["feb"],
row["mar"],
row["apr"],
row["may"],
row["jun"],
row["jul"],
row["aug"],
row["sep"],
row["oct"],
row["nov"],
row["dec"],
]
start = None
for m, inc in enumerate(months):
if inc and start is None:
start = m # start of a range
elif not inc and start is None:
continue # range hasn't started yet
elif inc and start is not None:
continue # continuance of a range
else:
lhs = ABBR[start]
rhs = ABBR[m - 1]
if lhs != rhs:
yield f"{lhs} - {rhs}" # previous element ended a range
else:
yield f"{lhs}" # captures a lone element
start = None
if start == 0:
yield "the entire year" # capture total range
elif start is not None:
lhs = ABBR[start]
rhs = ABBR[11]
if lhs != rhs:
yield f"{lhs} - {rhs}" # capture a trailing range
else:
yield f"{lhs}" # captures a trailing lone element
def discord_user_from_name(channel, name):
"""Returns the discord user from the given channel and name."""
if name is None:
return None
lname = name.lower()
members = channel.members
return next(filter(lambda member: lname in str(member).lower(), members), None)
def discord_user_from_id(channel, user_id):
"""Returns the discord user from the given channel and user id."""
if user_id is None:
return None
iid = int(user_id)
members = channel.members
return next(filter(lambda member: iid == member.id, members), None)
def discord_user_name(channel, name_or_id):
"""Returns the discord user name from the given channel and name or id."""
if not name_or_id:
return None
user = (
discord_user_from_id(channel, name_or_id)
if isinstance(name_or_id, int) or name_or_id.isdigit()
else discord_user_from_name(channel, name_or_id)
)
return str(user) if user else None
def discord_user_id(channel, name):
"""Returns the discord user id name from the given channel and name."""
if not name:
return None
return getattr(discord_user_from_name(channel, name), "id", None)
def is_turbot_admin(channel, user_or_member):
"""Checks to see if given user or member has the Turbot Admin role on this server."""
member = (
user_or_member
if hasattr(user_or_member, "roles") # members have a roles property
else channel.guild.get_member(user_or_member.id) # but users don't
)
return any(role.name == "Turbot Admin" for role in member.roles) if member else False
def command(f):
f.is_command = True
return f
class Turbot(discord.Client):
"""Discord turnip bot"""
def __init__(
self,
token="",
channels=[],
prices_file=DEFAULT_DB_PRICES,
art_file=DEFAULT_DB_ART,
fish_file=DEFAULT_DB_FISH,
fossils_file=DEFAULT_DB_FOSSILS,
users_file=DEFAULT_DB_USERS,
log_level=None,
):
if log_level: # pragma: no cover
logging.basicConfig(level=log_level)
super().__init__()
self.token = token
self.channels = channels
self.prices_file = prices_file
self.art_file = art_file
self.fish_file = fish_file
self.fossils_file = fossils_file
self.users_file = users_file
self.base_prophet_url = "https://turnipprophet.io/?prices=" # TODO: configurable?
self._prices_data = None # do not use directly, load it from load_prices()
self._art_data = None # do not use directly, load it from load_art()
self._fish_data = None # do not use directly, load it from load_fish()
self._fossils_data = None # do not use directly, load it from load_fossils()
self._users_data = None # do not use directly, load it from load_users()
self._last_backup_filename = None
# build a list of commands supported by this bot by fetching @command methods
members = inspect.getmembers(self, predicate=inspect.ismethod)
self._commands = [
member[0]
for member in members
if hasattr(member[1], "is_command") and member[1].is_command
]
def run(self): # pragma: no cover
super().run(self.token)
def save_prices(self, data):
"""Saves the given prices data to csv file."""
data.to_csv(self.prices_file, index=False) # persist to disk
self._prices_data = data # in-memory optimization
def last_backup_filename(self):
"""Return the name of the last known backup file for prices or None if unknown."""
return self._last_backup_filename
def backup_prices(self, data):
"""Backs up the prices data to a datetime stamped file."""
filename = datetime.now(pytz.utc).strftime(
"prices-%Y-%m-%d.csv" # TODO: configurable?
)
filepath = Path(self.prices_file).parent / filename
self._last_backup_filename = filepath
data.to_csv(filepath, index=False)
def load_prices(self):
"""Loads up and returns the application price data as a DataFrame."""
if self._prices_data is not None:
return self._prices_data
cols = ["author", "kind", "price", "timestamp"]
dtypes = ["int64", "object", "int64", "datetime64[ns, UTC]"]
if Path(self.prices_file).exists():
self._prices_data = pd.read_csv(
self.prices_file, names=cols, parse_dates=True, skiprows=1
)
else:
self._prices_data = pd.read_csv(
StringIO(""), names=cols, dtype=dict(zip(cols, dtypes))
)
self._prices_data = self._prices_data.astype(dict(zip(cols, dtypes)))
return self._prices_data
def save_users(self, data):
"""Saves the given users data to csv file."""
data.to_csv(self.users_file, index=False) # persist to disk
self._users_data = data # in-memory optimization
def load_users(self):
"""Returns a DataFrame of user data or creates an empty one."""
if self._users_data is not None:
self._users_data = self._users_data.fillna("")
return self._users_data
cols = ["author", *USER_PREFRENCES]
dtypes = ["int64", "str", "str", "str", "str", "str", "str", "str"]
if Path(self.users_file).exists():
self._users_data = pd.read_csv(self.users_file, names=cols, skiprows=1)
else:
self._users_data = pd.read_csv(
StringIO(""), names=cols, dtype=dict(zip(cols, dtypes))
)
self._users_data = self._users_data.fillna("")
self._users_data = self._users_data.astype(dict(zip(cols, dtypes)))
return self._users_data
def save_art(self, data):
"""Saves the given art data to csv file."""
data.to_csv(self.art_file, index=False) # persist to disk
self._art_data = data # in-memory optimization
def load_art(self):
"""Returns a DataFrame of art data or creates an empty one."""
if self._art_data is None:
try:
self._art_data = pd.read_csv(self.art_file)
except FileNotFoundError:
self._art_data = | pd.DataFrame(columns=["author", "name"]) | pandas.DataFrame |
import csv
import httplib2
from apiclient.discovery import build
import urllib
import json
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
from plotly.tools import FigureFactory as FF
plotly.tools.set_credentials_file(username = 'ediering', api_key='k23nwbsle7')
# This API key is provided by google as described in the tutorial
API_KEY = '<KEY>'
# This is the table id for the fusion table
TABLE_ID = '15CnIT8u1snCOSRYjV3lPrEnUR_5qoGZ1ZhwGytAt'
try:
fp = open("data.json")
response = json.load(fp)
except IOError:
service = build('fusiontables', 'v1', developerKey=API_KEY)
query = "SELECT * FROM " + TABLE_ID #+ " WHERE 'Total Energy Cost ($)' > 0 AND 'Total Energy Cost ($)' < 1000000 "
response = service.query().sql(sql=query).execute()
fp = open("data.json", "w+")
json.dump(response, fp)
# print len(response['rows'])
data_df = pd.DataFrame(response[u'rows'], columns = response[u'columns'])
working = data_df[['Site', 'Site ID', 'Year', 'Total Energy (kBtu)', 'Total Energy Cost ($)']]
pivot_cost = working.pivot(index='Site ID', columns='Year', values='Total Energy Cost ($)')
pivot_energy = working.pivot(index='Site ID', columns='Year', values='Total Energy (kBtu)')
def totalcostplot_energy():
pivot_cost = working.pivot(index='Site ID', columns='Year', values='Total Energy Cost ($)')
pivot_energy = working.pivot(index='Site ID', columns='Year', values='Total Energy (kBtu)')
rows = pivot_cost.index
plot = []
for i in xrange(len(rows)):
index = rows[i]
trace = go.Scatter(
x = pivot_cost.columns.values,
y = pivot_cost.loc[index],
#mode = 'markers'
)
plot.append(trace)
layout = go.Layout(
xaxis=dict(
autotick=False),
showlegend=False)
fig= go.Figure(data=plot, layout=layout)
return fig
def boxplot():
ten = pd.to_numeric(pivot_cost['2010']).dropna()
eleven = pd.to_numeric(pivot_cost['2011']).dropna()
twelve = pd.to_numeric(pivot_cost['2012']).dropna()
thirteen = pd.to_numeric(pivot_cost['2013']).dropna()
fourteen = pd.to_numeric(pivot_cost['2014']).dropna()
trace0 = go.Box(
y= ten,
name = '2010'
)
trace1 = go.Box(
y= eleven,
name = '2011'
)
trace2 = go.Box(
y= twelve,
name = '2012'
)
trace3 = go.Box(
y= thirteen,
name = '2013'
)
trace4 = go.Box(
y= fourteen,
name = '2014'
)
data = [trace0, trace1, trace2, trace3, trace4]
layout = go.Layout(
yaxis=dict(
range=[0, 40000]
)
)
return [data, layout]
def histogram():
ten = pd.to_numeric(pivot_cost['2010']).dropna()
eleven = pd.to_numeric(pivot_cost['2011']).dropna()
twelve = pd.to_numeric(pivot_cost['2012']).dropna()
thirteen = pd.to_numeric(pivot_cost['2013']).dropna()
fourteen = pd.to_numeric(pivot_cost['2014']).dropna()
plt = sns.distplot(fourteen)
plt1 = sns.distplot(thirteen)
plt2 = sns.distplot(twelve)
plt3 = sns.distplot(eleven)
plt4 = sns.distplot(ten)
fig = plt.get_figure()
fig.savefig("overlay.png")
def sum_data():
cost = working['Total Energy Cost ($)']
print(cost[1])
def average_data():
data_df = pd.DataFrame(response[u'rows'], columns = response[u'columns'])
working = data_df[['Site', 'Site ID', 'Year', 'Total Energy (kBtu)', 'Total Energy Cost ($)']]
pivot_cost = working.pivot(index='Site ID', columns='Year', values='Total Energy Cost ($)')
ten = pd.to_numeric(pivot_cost['2010']).dropna()
eleven = | pd.to_numeric(pivot_cost['2011']) | pandas.to_numeric |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import os.path
import pkg_resources
import tempfile
import unittest
import numpy as np
import pandas as pd
from qiime2.metadata import (Metadata, CategoricalMetadataColumn,
NumericMetadataColumn, MetadataFileError)
def get_data_path(filename):
return pkg_resources.resource_filename('qiime2.metadata.tests',
'data/%s' % filename)
# NOTE: many of the test files in the `data` directory intentionally have
# leading/trailing whitespace characters on some lines, as well as mixed usage
# of spaces, tabs, carriage returns, and newlines. When editing these files,
# please make sure your code editor doesn't strip these leading/trailing
# whitespace characters (e.g. Atom does this by default), nor automatically
# modify the files in some other way such as converting Windows-style CRLF
# line terminators to Unix-style newlines.
#
# When committing changes to the files, carefully review the diff to make sure
# unintended changes weren't introduced.
class TestLoadErrors(unittest.TestCase):
def test_path_does_not_exist(self):
with self.assertRaisesRegex(MetadataFileError,
"Metadata file path doesn't exist"):
Metadata.load(
'/qiime2/unit/tests/hopefully/this/path/does/not/exist')
def test_path_is_directory(self):
fp = get_data_path('valid')
with self.assertRaisesRegex(MetadataFileError,
"path points to something other than a "
"file"):
Metadata.load(fp)
def test_non_utf_8_file(self):
fp = get_data_path('invalid/non-utf-8.tsv')
with self.assertRaisesRegex(MetadataFileError,
'encoded as UTF-8 or ASCII'):
Metadata.load(fp)
def test_utf_16_le_file(self):
fp = get_data_path('invalid/simple-utf-16le.txt')
with self.assertRaisesRegex(MetadataFileError,
'UTF-16 Unicode'):
Metadata.load(fp)
def test_utf_16_be_file(self):
fp = get_data_path('invalid/simple-utf-16be.txt')
with self.assertRaisesRegex(MetadataFileError,
'UTF-16 Unicode'):
Metadata.load(fp)
def test_empty_file(self):
fp = get_data_path('invalid/empty-file')
with self.assertRaisesRegex(MetadataFileError,
'locate header.*file may be empty'):
Metadata.load(fp)
def test_comments_and_empty_rows_only(self):
fp = get_data_path('invalid/comments-and-empty-rows-only.tsv')
with self.assertRaisesRegex(MetadataFileError,
'locate header.*only of comments or empty '
'rows'):
Metadata.load(fp)
def test_header_only(self):
fp = get_data_path('invalid/header-only.tsv')
with self.assertRaisesRegex(MetadataFileError, 'at least one ID'):
Metadata.load(fp)
def test_header_only_with_comments_and_empty_rows(self):
fp = get_data_path(
'invalid/header-only-with-comments-and-empty-rows.tsv')
with self.assertRaisesRegex(MetadataFileError, 'at least one ID'):
Metadata.load(fp)
def test_qiime1_empty_mapping_file(self):
fp = get_data_path('invalid/qiime1-empty.tsv')
with self.assertRaisesRegex(MetadataFileError, 'at least one ID'):
Metadata.load(fp)
def test_invalid_header(self):
fp = get_data_path('invalid/invalid-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'unrecognized ID column name.*'
'invalid_id_header'):
Metadata.load(fp)
def test_empty_id(self):
fp = get_data_path('invalid/empty-id.tsv')
with self.assertRaisesRegex(MetadataFileError, 'empty metadata ID'):
Metadata.load(fp)
def test_whitespace_only_id(self):
fp = get_data_path('invalid/whitespace-only-id.tsv')
with self.assertRaisesRegex(MetadataFileError, 'empty metadata ID'):
Metadata.load(fp)
def test_empty_column_name(self):
fp = get_data_path('invalid/empty-column-name.tsv')
with self.assertRaisesRegex(MetadataFileError,
'column without a name'):
Metadata.load(fp)
def test_whitespace_only_column_name(self):
fp = get_data_path('invalid/whitespace-only-column-name.tsv')
with self.assertRaisesRegex(MetadataFileError,
'column without a name'):
Metadata.load(fp)
def test_duplicate_ids(self):
fp = get_data_path('invalid/duplicate-ids.tsv')
with self.assertRaisesRegex(MetadataFileError,
'IDs must be unique.*id1'):
Metadata.load(fp)
def test_duplicate_ids_with_whitespace(self):
fp = get_data_path('invalid/duplicate-ids-with-whitespace.tsv')
with self.assertRaisesRegex(MetadataFileError,
'IDs must be unique.*id1'):
Metadata.load(fp)
def test_duplicate_column_names(self):
fp = get_data_path('invalid/duplicate-column-names.tsv')
with self.assertRaisesRegex(MetadataFileError,
'Column names must be unique.*col1'):
Metadata.load(fp)
def test_duplicate_column_names_with_whitespace(self):
fp = get_data_path(
'invalid/duplicate-column-names-with-whitespace.tsv')
with self.assertRaisesRegex(MetadataFileError,
'Column names must be unique.*col1'):
Metadata.load(fp)
def test_id_conflicts_with_id_header(self):
fp = get_data_path('invalid/id-conflicts-with-id-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
"ID 'id' conflicts.*ID column header"):
Metadata.load(fp)
def test_column_name_conflicts_with_id_header(self):
fp = get_data_path(
'invalid/column-name-conflicts-with-id-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
"column name 'featureid' conflicts.*ID "
"column header"):
Metadata.load(fp)
def test_column_types_unrecognized_column_name(self):
fp = get_data_path('valid/simple.tsv')
with self.assertRaisesRegex(MetadataFileError,
'not_a_column.*column_types.*not a column '
'in the metadata file'):
Metadata.load(fp, column_types={'not_a_column': 'numeric'})
def test_column_types_unrecognized_column_type(self):
fp = get_data_path('valid/simple.tsv')
with self.assertRaisesRegex(MetadataFileError,
'col2.*column_types.*unrecognized column '
'type.*CATEGORICAL'):
Metadata.load(fp, column_types={'col1': 'numeric',
'col2': 'CATEGORICAL'})
def test_column_types_not_convertible_to_numeric(self):
fp = get_data_path('valid/simple.tsv')
with self.assertRaisesRegex(MetadataFileError,
"column 'col3' to numeric.*could not be "
"interpreted as numeric: 'bar', 'foo'"):
Metadata.load(fp, column_types={'col1': 'numeric',
'col2': 'categorical',
'col3': 'numeric'})
def test_column_types_override_directive_not_convertible_to_numeric(self):
fp = get_data_path('valid/simple-with-directive.tsv')
with self.assertRaisesRegex(MetadataFileError,
"column 'col3' to numeric.*could not be "
"interpreted as numeric: 'bar', 'foo'"):
Metadata.load(fp, column_types={'col3': 'numeric'})
def test_directive_before_header(self):
fp = get_data_path('invalid/directive-before-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'directive.*#q2:types.*searching for '
'header'):
Metadata.load(fp)
def test_unrecognized_directive(self):
fp = get_data_path('invalid/unrecognized-directive.tsv')
with self.assertRaisesRegex(MetadataFileError,
'Unrecognized directive.*#q2:foo.*'
'#q2:types directive is supported'):
Metadata.load(fp)
def test_duplicate_directives(self):
fp = get_data_path('invalid/duplicate-directives.tsv')
with self.assertRaisesRegex(MetadataFileError,
'duplicate directive.*#q2:types'):
Metadata.load(fp)
def test_unrecognized_column_type_in_directive(self):
fp = get_data_path('invalid/unrecognized-column-type.tsv')
with self.assertRaisesRegex(MetadataFileError,
'col2.*unrecognized column type.*foo.*'
'#q2:types directive'):
Metadata.load(fp)
def test_column_types_directive_not_convertible_to_numeric(self):
fp = get_data_path('invalid/types-directive-non-numeric.tsv')
# This error message regex is intentionally verbose because we want to
# assert that many different types of non-numeric strings aren't
# interpreted as numbers. The error message displays a sorted list of
# all values that couldn't be converted to numbers, making it possible
# to test a variety of non-numeric strings in a single test case.
msg = (r"column 'col2' to numeric.*could not be interpreted as "
r"numeric: '\$42', '\+inf', '-inf', '0xAF', '1,000', "
r"'1\.000\.0', '1_000_000', '1e3e4', 'Infinity', 'NA', 'NaN', "
"'a', 'e3', 'foo', 'inf', 'nan', 'sample-1'")
with self.assertRaisesRegex(MetadataFileError, msg):
Metadata.load(fp)
def test_directive_after_directives_section(self):
fp = get_data_path(
'invalid/directive-after-directives-section.tsv')
with self.assertRaisesRegex(MetadataFileError,
'#q2:types.*outside of the directives '
'section'):
Metadata.load(fp)
def test_directive_longer_than_header(self):
fp = get_data_path('invalid/directive-longer-than-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'row has 5 cells.*header declares 4 '
'cells'):
Metadata.load(fp)
def test_data_longer_than_header(self):
fp = get_data_path('invalid/data-longer-than-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'row has 5 cells.*header declares 4 '
'cells'):
Metadata.load(fp)
class TestLoadSuccess(unittest.TestCase):
def setUp(self):
self.temp_dir_obj = tempfile.TemporaryDirectory(
prefix='qiime2-metadata-tests-temp-')
self.temp_dir = self.temp_dir_obj.name
# This Metadata object is compared against observed Metadata objects in
# many of the tests, so just define it once here.
self.simple_md = Metadata(
pd.DataFrame({'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
# Basic sanity check to make sure the columns are ordered and typed as
# expected. It'd be unfortunate to compare observed results to expected
# results that aren't representing what we think they are!
obs_columns = [(name, props.type)
for name, props in self.simple_md.columns.items()]
exp_columns = [('col1', 'numeric'), ('col2', 'categorical'),
('col3', 'categorical')]
self.assertEqual(obs_columns, exp_columns)
def tearDown(self):
self.temp_dir_obj.cleanup()
def test_simple(self):
# Simple metadata file without comments, empty rows, jaggedness,
# missing data, odd IDs or column names, directives, etc. The file has
# multiple column types (numeric, categorical, and something that has
# mixed numbers and strings, which must be interpreted as categorical).
fp = get_data_path('valid/simple.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_bom_simple_txt(self):
# This is the encoding that notepad.exe will use most commonly
fp = get_data_path('valid/BOM-simple.txt')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_different_file_extension(self):
fp = get_data_path('valid/simple.txt')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_no_newline_at_eof(self):
fp = get_data_path('valid/no-newline-at-eof.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_unix_line_endings(self):
fp = get_data_path('valid/unix-line-endings.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_windows_line_endings(self):
fp = get_data_path('valid/windows-line-endings.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_mac_line_endings(self):
fp = get_data_path('valid/mac-line-endings.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_no_source_artifacts(self):
fp = get_data_path('valid/simple.tsv')
metadata = Metadata.load(fp)
self.assertEqual(metadata.artifacts, ())
def test_retains_column_order(self):
# Explicitly test that the file's column order is retained in the
# Metadata object. Many of the test cases use files with column names
# in alphabetical order (e.g. "col1", "col2", "col3"), which matches
# how pandas orders columns in a DataFrame when supplied with a dict
# (many of the test cases use this feature of the DataFrame
# constructor when constructing the expected DataFrame).
fp = get_data_path('valid/column-order.tsv')
obs_md = Metadata.load(fp)
# Supply DataFrame constructor with explicit column ordering instead of
# a dict.
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_columns = ['z', 'y', 'x']
exp_data = [
[1.0, 'a', 'foo'],
[2.0, 'b', 'bar'],
[3.0, 'c', '42']
]
exp_df = pd.DataFrame(exp_data, index=exp_index, columns=exp_columns)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_leading_trailing_whitespace(self):
fp = get_data_path('valid/leading-trailing-whitespace.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_comments(self):
fp = get_data_path('valid/comments.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_empty_rows(self):
fp = get_data_path('valid/empty-rows.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_qiime1_mapping_file(self):
fp = get_data_path('valid/qiime1.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='#SampleID')
exp_df = pd.DataFrame({'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_qiita_sample_information_file(self):
fp = get_data_path('valid/qiita-sample-information.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id.1', 'id.2'], name='sample_name')
exp_df = pd.DataFrame({
'DESCRIPTION': ['description 1', 'description 2'],
'TITLE': ['A Title', 'Another Title']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_qiita_preparation_information_file(self):
fp = get_data_path('valid/qiita-preparation-information.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id.1', 'id.2'], name='sample_name')
exp_df = pd.DataFrame({
'BARCODE': ['ACGT', 'TGCA'],
'EXPERIMENT_DESIGN_DESCRIPTION': ['longitudinal study',
'longitudinal study']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_biom_observation_metadata_file(self):
fp = get_data_path('valid/biom-observation-metadata.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['OTU_1', 'OTU_2'], name='#OTUID')
exp_df = pd.DataFrame([['k__Bacteria;p__Firmicutes', 0.890],
['k__Bacteria', 0.9999]],
columns=['taxonomy', 'confidence'],
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_supported_id_headers(self):
case_insensitive = {
'id', 'sampleid', 'sample id', 'sample-id', 'featureid',
'feature id', 'feature-id'
}
exact_match = {
'#SampleID', '#Sample ID', '#OTUID', '#OTU ID', 'sample_name'
}
# Build a set of supported headers, including exact matches and headers
# with different casing.
headers = set()
for header in case_insensitive:
headers.add(header)
headers.add(header.upper())
headers.add(header.title())
for header in exact_match:
headers.add(header)
fp = os.path.join(self.temp_dir, 'metadata.tsv')
count = 0
for header in headers:
with open(fp, 'w') as fh:
fh.write('%s\tcolumn\nid1\tfoo\nid2\tbar\n' % header)
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2'], name=header)
exp_df = pd.DataFrame({'column': ['foo', 'bar']}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
count += 1
# Since this test case is a little complicated, make sure that the
# expected number of comparisons are happening.
self.assertEqual(count, 26)
def test_recommended_ids(self):
fp = get_data_path('valid/recommended-ids.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['c6ca034a-223f-40b4-a0e0-45942912a5ea', 'My.ID'],
name='id')
exp_df = pd.DataFrame({'col1': ['foo', 'bar']}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_non_standard_characters(self):
# Test that non-standard characters in IDs, column names, and cells are
# handled correctly. The test case isn't exhaustive (e.g. it doesn't
# test every Unicode character; that would be a nice additional test
# case to have in the future). Instead, this test aims to be more of an
# integration test for the robustness of the reader to non-standard
# data. Many of the characters and their placement within the data file
# are based on use-cases/bugs reported on the forum, Slack, etc. The
# data file has comments explaining these test case choices in more
# detail.
fp = get_data_path('valid/non-standard-characters.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['©id##1', '((id))2', "'id_3<>'", '"id#4"',
'i d\r\t\n5'], name='id')
exp_columns = ['↩c@l1™', 'col(#2)', "#col'3", '"<col_4>"',
'col\t \r\n5']
exp_data = [
['ƒoo', '(foo)', '#f o #o', 'fo\ro', np.nan],
["''2''", 'b#r', 'ba\nr', np.nan, np.nan],
['b"ar', 'c\td', '4\r\n2', np.nan, np.nan],
['b__a_z', '<42>', '>42', np.nan, np.nan],
['baz', np.nan, '42']
]
exp_df = pd.DataFrame(exp_data, index=exp_index, columns=exp_columns)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_missing_data(self):
fp = get_data_path('valid/missing-data.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['None', 'nan', 'NA'], name='id')
exp_df = pd.DataFrame(collections.OrderedDict([
('col1', [1.0, np.nan, np.nan]),
('NA', [np.nan, np.nan, np.nan]),
('col3', ['null', 'N/A', 'NA']),
('col4', np.array([np.nan, np.nan, np.nan], dtype=object))]),
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
# Test that column types are correct (mainly for the two empty columns;
# one should be numeric, the other categorical).
obs_columns = [(name, props.type)
for name, props in obs_md.columns.items()]
exp_columns = [('col1', 'numeric'), ('NA', 'numeric'),
('col3', 'categorical'), ('col4', 'categorical')]
self.assertEqual(obs_columns, exp_columns)
def test_minimal_file(self):
# Simplest possible metadata file consists of one ID and zero columns.
fp = get_data_path('valid/minimal.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['a'], name='id')
exp_df = pd.DataFrame({}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_single_id(self):
fp = get_data_path('valid/single-id.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1'], name='id')
exp_df = pd.DataFrame({'col1': [1.0], 'col2': ['a'], 'col3': ['foo']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_no_columns(self):
fp = get_data_path('valid/no-columns.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['a', 'b', 'my-id'], name='id')
exp_df = pd.DataFrame({}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_single_column(self):
fp = get_data_path('valid/single-column.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': [1.0, 2.0, 3.0]}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_trailing_columns(self):
fp = get_data_path('valid/trailing-columns.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_jagged_trailing_columns(self):
# Test case based on https://github.com/qiime2/qiime2/issues/335
fp = get_data_path('valid/jagged-trailing-columns.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_padding_rows_shorter_than_header(self):
fp = get_data_path('valid/rows-shorter-than-header.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': [1.0, 2.0, np.nan],
'col2': ['a', np.nan, np.nan],
'col3': [np.nan, np.nan, np.nan]},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_all_cells_padded(self):
fp = get_data_path('valid/all-cells-padded.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': [np.nan, np.nan, np.nan],
'col2': [np.nan, np.nan, np.nan],
'col3': [np.nan, np.nan, np.nan]},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_does_not_cast_ids_or_column_names(self):
fp = get_data_path('valid/no-id-or-column-name-type-cast.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['0.000001', '0.004000', '0.000000'],
dtype=object, name='id')
exp_columns = ['42.0', '1000', '-4.2']
exp_data = [
[2.0, 'b', 2.5],
[1.0, 'b', 4.2],
[3.0, 'c', -9.999]
]
exp_df = pd.DataFrame(exp_data, index=exp_index, columns=exp_columns)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_numeric_column(self):
fp = get_data_path('valid/numeric-column.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3', 'id4', 'id5', 'id6', 'id7',
'id8', 'id9', 'id10', 'id11', 'id12'], name='id')
exp_df = pd.DataFrame({'col1': [0.0, 2.0, 0.0003, -4.2, 1e-4, 1e4,
1.5e2, np.nan, 1.0, 0.5, 1e-8, -0.0]},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_numeric_column_as_categorical(self):
fp = get_data_path('valid/numeric-column.tsv')
obs_md = Metadata.load(fp, column_types={'col1': 'categorical'})
exp_index = pd.Index(['id1', 'id2', 'id3', 'id4', 'id5', 'id6', 'id7',
'id8', 'id9', 'id10', 'id11', 'id12'], name='id')
exp_df = pd.DataFrame({'col1': ['0', '2.0', '0.00030', '-4.2', '1e-4',
'1e4', '+1.5E+2', np.nan, '1.', '.5',
'1e-08', '-0']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_with_complete_types_directive(self):
fp = get_data_path('valid/complete-types-directive.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_with_partial_types_directive(self):
fp = get_data_path('valid/partial-types-directive.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_with_empty_types_directive(self):
fp = get_data_path('valid/empty-types-directive.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_with_case_insensitive_types_directive(self):
fp = get_data_path('valid/case-insensitive-types-directive.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': [-5.0, 0.0, 42.0]},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_column_types_without_directive(self):
fp = get_data_path('valid/simple.tsv')
obs_md = Metadata.load(fp, column_types={'col1': 'categorical'})
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_column_types_override_directive(self):
fp = get_data_path('valid/simple-with-directive.tsv')
obs_md = Metadata.load(fp, column_types={'col1': 'categorical',
'col2': 'categorical'})
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
class TestSave(unittest.TestCase):
def setUp(self):
self.temp_dir_obj = tempfile.TemporaryDirectory(
prefix='qiime2-metadata-tests-temp-')
self.temp_dir = self.temp_dir_obj.name
self.filepath = os.path.join(self.temp_dir, 'metadata.tsv')
def tearDown(self):
self.temp_dir_obj.cleanup()
def test_simple(self):
md = Metadata(pd.DataFrame(
{'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md.save(self.filepath)
with open(self.filepath, 'r') as fh:
obs = fh.read()
exp = (
"id\tcol1\tcol2\tcol3\n"
"#q2:types\tnumeric\tcategorical\tcategorical\n"
"id1\t1\ta\tfoo\n"
"id2\t2\tb\tbar\n"
"id3\t3\tc\t42\n"
)
self.assertEqual(obs, exp)
def test_save_metadata_auto_extension(self):
md = Metadata(pd.DataFrame(
{'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
# Filename & extension endswith is matching (non-default).
fp = os.path.join(self.temp_dir, 'metadatatsv')
obs_md = md.save(fp, '.tsv')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadatatsv.tsv')
# No period in filename; no extension included.
fp = os.path.join(self.temp_dir, 'metadata')
obs_md = md.save(fp)
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata')
# No period in filename; no period in extension.
fp = os.path.join(self.temp_dir, 'metadata')
obs_md = md.save(fp, 'tsv')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv')
# No period in filename; multiple periods in extension.
fp = os.path.join(self.temp_dir, 'metadata')
obs_md = md.save(fp, '..tsv')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv')
# Single period in filename; no period in extension.
fp = os.path.join(self.temp_dir, 'metadata.')
obs_md = md.save(fp, 'tsv')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv')
# Single period in filename; single period in extension.
fp = os.path.join(self.temp_dir, 'metadata.')
obs_md = md.save(fp, '.tsv')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv')
# Single period in filename; multiple periods in extension.
fp = os.path.join(self.temp_dir, 'metadata.')
obs_md = md.save(fp, '..tsv')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv')
# Multiple periods in filename; single period in extension.
fp = os.path.join(self.temp_dir, 'metadata..')
obs_md = md.save(fp, '.tsv')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv')
# Multiple periods in filename; multiple periods in extension.
fp = os.path.join(self.temp_dir, 'metadata..')
obs_md = md.save(fp, '..tsv')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv')
# No extension in filename; no extension input.
fp = os.path.join(self.temp_dir, 'metadata')
obs_md = md.save(fp)
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata')
# No extension in filename; extension input.
fp = os.path.join(self.temp_dir, 'metadata')
obs_md = md.save(fp, '.tsv')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv')
# Extension in filename; no extension input.
fp = os.path.join(self.temp_dir, 'metadata.tsv')
obs_md = md.save(fp)
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv')
# Extension in filename; extension input (non-matching).
fp = os.path.join(self.temp_dir, 'metadata.tsv')
obs_md = md.save(fp, '.txt')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv.txt')
# Extension in filename; extension input (matching).
fp = os.path.join(self.temp_dir, 'metadata.tsv')
obs_md = md.save(fp, '.tsv')
obs_filename = os.path.basename(obs_md)
self.assertEqual(obs_filename, 'metadata.tsv')
def test_no_bom(self):
md = Metadata(pd.DataFrame(
{'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md.save(self.filepath)
with open(self.filepath, 'rb') as fh:
obs = fh.read(2)
self.assertEqual(obs, b'id')
def test_different_file_extension(self):
md = Metadata(pd.DataFrame(
{'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index= | pd.Index(['id1', 'id2', 'id3'], name='id') | pandas.Index |
import pandas as pd
from pandas._testing import assert_frame_equal
import pytest
import numpy as np
from scripts.my_normalize_data import (
normalize_expedition_section_cols,
remove_bracket_text,
remove_whitespace,
normalize_columns
)
class XTestNormalizeColumns:
def test_replace_column_name_with_value_from_columns_mapping(self):
columns_mapping = {"aa": "A"}
data = {"aa": [1]}
df = pd.DataFrame(data)
data = {"A": [1]}
expected = pd.DataFrame(data)
normalize_columns(df, columns_mapping)
assert_frame_equal(df, expected)
def test_replace_multiple_column_name_with_value_from_columns_mapping(self):
columns_mapping = {"aa": "A", "b b": "B"}
data = {"aa": [1], "b b": [2]}
df = pd.DataFrame(data)
data = {"A": [1], "B": [2]}
expected = pd.DataFrame(data)
normalize_columns(df, columns_mapping)
assert_frame_equal(df, expected)
def test_does_not_affect_columns_not_in_columns_mapping(self):
columns_mapping = {"aa": "A", "b b": "B"}
data = {"aa": [1], "b b": [2], "cc": [3]}
df = pd.DataFrame(data)
data = {"A": [1], "B": [2], "cc": [3]}
expected = pd.DataFrame(data)
normalize_columns(df, columns_mapping)
assert_frame_equal(df, expected)
def test_does_not_affect_columns_if_columns_mapping_has_no_value(self):
columns_mapping = {"aa": None, "bb": "", "cc": np.nan}
data = {"aa": [1], "b b": [2], "cc": [3]}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
normalize_columns(df, columns_mapping)
assert_frame_equal(df, expected)
class XTestRemoveBracketText:
def test_removes_text_within_brackets_at_end_of_cell(self):
df = pd.DataFrame(['aa [A]', 'bb [BB]', 'cc [C] ', 'dd [dd] '])
expected = pd.DataFrame(['aa', 'bb', 'cc', 'dd'])
remove_bracket_text(df)
assert_frame_equal(df, expected)
def test_does_not_remove_text_within_brackets_at_start_of_cell(self):
df = pd.DataFrame(['[A] aa', '[BB] bb', '[C] cc ', ' [dd] dd '])
expected = df.copy()
remove_bracket_text(df)
| assert_frame_equal(df, expected) | pandas._testing.assert_frame_equal |
from __future__ import division
from matplotlib import pyplot as plt
import matplotlib.colors as colors
from matplotlib.pylab import *
from heapq import heappush, heappop
from itertools import count
import os
import pandas as pd
import numpy as np
import networkx as nx
import geopandas as gp
import ema_workbench
from od_prep import od_aggregation
__all__ = ['aon_assignment',
'probit_assignment',
'edge_betweenness_centrality',
'edge_betweenness_subset_od',
'betweenness_to_df',
'edge_betweenness_subset_od_ema',
'ema_betweenness',
'k_shortest_paths',
'ksp_edge_betweenness_subset_od',
'sp_dict_graph_creation',
'interdiction_single_edge']
def aon_assignment(G, sources, targets, weight, od):
'''
Function to do All-or-Nothing assignment on transport network
Parameters
------------
G: Graph
Transport network Graph Networkx object that will be analyzed
sources: list
List of nodes (integer) that will be used as sources. The integer should correspond to
node id in G Graph
targets: list
List of nodes (integer) that will be used as targets. The integer should correspond to
node id in G Graph
weight: str
String which corresponds to attribute of G Graph's edges that will be used as penalty for each
edge. In most cases this is defined as 'length' of the edge.
od: DataFrame
OD matrix dataframe
Returns
------------
d: dict
Dictionary with edge tuple as keys (e.g. (2,3) ) and flow value as values
'''
#create empty dict
d={}
#iterate over all sources
for i in range(len(sources)):
source = sources[i]
#iterate over all edges
for j in range(len(targets)):
target = targets[j]
#it is assumed that there is no self-loop on the node
#e.g. there is no flow from node A to node A
if source != target :
#determine shortest path between the OD pair
sp_dijk_all = nx.dijkstra_path(G, source=source, target=target, weight=weight)
#update the betweenness value of all edges in the shortest path
flow = od[source][target]
for j in range(len(sp_dijk_all)-1):
lst = [sp_dijk_all[j],sp_dijk_all[j+1]]
lst = [min(lst), max(lst)]
tup = tuple(lst)
if tup in d.keys():
d[tup]+=1*flow
else:
d.update({tup:1*flow})
#assign 0 to all edges which don't belong to any shortest path
#at the same time, record all the correct order of edges name
edges_list = []
for u,v in G.edges():
elst = [u,v]
elst = [min(elst), max(elst)]
etup = tuple(elst)
if not etup in d.keys():
d.update({etup:0})
tup = tuple([u,v])
edges_list.append(tup)
#alter the tuple(u,v) to tuple(v,u) if the order is inconsistent with the original graph's order
d1 = {}
for key, val in d.iteritems():
if not key in edges_list:
tup = tuple([key[1], key[0]])
d1.update({tup:val})
else:
d1.update({key:val})
return d1
def probit_assignment(G, sources, targets, weight, od, N=5, sd=10, penalty=0):
'''
Function to do stochastic probit assignment on transport network. The weight of the transport network
is sampled by normal distribution with the original link weight as the mean.
Parameters
------------
G: Graph
Transport network Graph Networkx object that will be analyzed
sources: list
List of nodes (integer) that will be used as sources. The integer should correspond to
node id in G Graph
targets: list
List of nodes (integer) that will be used as targets. The integer should correspond to
node id in G Graph
weight: str
String which corresponds to attribute of G Graph's edges that will be used as penalty for each
edge. In most cases this is defined as 'length' of the edge.
od: DataFrame
OD matrix dataframe
N: int
Number of probit iterations that want to be performed
sd: int
Percentage of the link's weight that will be used as standard deviation of the normal distribution (e.g.
if 10 is inputted, then the standard deviation is 10% of the link's weight). If you don't want to sample
over the normal distribution, set sd to 0.
penalty: double
Penalty that is given to links which have been part of shortest paths set. If set, the value should be higher
than 1. The intention is to force the code to find distinguished shortest paths between each probit iteration
by increasing the weight of links that have been part of shortest paths in previous iterations.
Returns
------------
d: dict
Dictionary with edge tuple as keys (e.g. (2,3) ) and flow value as values
'''
#create empty dict
d={}
#create copy of original network to avoid changing the attributes of the original network
G1 = G.copy()
#iterate N times
#in each iteration, sample the link's weight by using normal distribution
for i in np.arange(N):
length_dict = {}
for u,v,data in G1.edges(data=True):
tup = tuple([u,v])
if sd > 0:
length_mean = data[weight]
stdev = sd/100
length_sd = length_mean * stdev
length = np.random.normal(length_mean, length_sd)
if length < 0:
length = 0
else:
length = data[weight]
length_dict.update({tup:length})
#create a copy of G1 since we want to work the penalty on G1 later
G2 = G1.copy()
#set the attribute of G2, we'll work the assignment based on G2's weight information
nx.set_edge_attributes(G2, weight, length_dict)
#iterate over all sources
penalty_list = []
for i in range(len(sources)):
source = sources[i]
#iterate over all edges
for j in range(len(targets)):
target = targets[j]
#it is assumed that there is no self-loop on the node
#e.g. there is no flow from node A to node A
if source != target :
#determine shortest path between the OD pair
sp_dijk_all = nx.dijkstra_path(G2, source=source, target=target, weight=weight)
#update the betweenness value of all edges in the shortest path
flow = od[source][target]
#divide the flow over the number of iteration
flow = flow/N
for j in range(len(sp_dijk_all)-1):
lst = [sp_dijk_all[j],sp_dijk_all[j+1]]
lst = [min(lst), max(lst)]
tup = tuple(lst)
if tup in d.keys():
d[tup]+=1*flow
else:
d.update({tup:1*flow})
#if we want to work with penalty, record the shortest paths
if penalty > 0:
penalty_list.append(tup)
tup = tuple([tup[1],tup[0]])
penalty_list.append(tup)
#if work with penalty, update the weight of the links which belong to the shortest paths
if penalty > 0:
penalty_dict = {}
for u,v,data in G1.edges(data=True):
if tuple([u,v]) in penalty_list:
length = data[weight] * penalty
else:
length = data[weight]
penalty_dict.update({tuple([u,v]):length})
nx.set_edge_attributes(G1, weight, penalty_dict)
#assign 0 to all edges which don't belong to any shortest path
#at the same time, record all the correct order of edges name
edges_list = []
for u,v in G.edges():
elst = [u,v]
elst = [min(elst), max(elst)]
etup = tuple(elst)
if not etup in d.keys():
d.update({etup:0})
tup = tuple([u,v])
edges_list.append(tup)
#alter the tuple(u,v) to tuple(v,u) if the order is inconsistent with the original graph's order
d1 = {}
for key, val in d.iteritems():
if not key in edges_list:
tup = tuple([key[1], key[0]])
d1.update({tup:val})
else:
d1.update({key:val})
return d1
def edge_betweenness_centrality(flow, od):
'''
Function to do stochastic probit assignment on transport network. The weight of the transport network
is sampled by normal distribution with the original link weight as the mean
Parameters
------------
flow: dict
Flow dictionary obtained from assignment function (e.g. from aon_assignment or probit_assignment)
od: DataFrame
OD matrix dataframe
Returns
------------
d: dict
Dictionary with edge tuple as keys (e.g. (2,3) ) and betweenness value as values
'''
#record the total flow in the network
totalval = (sum(od.sum()))
#copy the flow to avoid changing the original flow dictionary
flow2 = flow.copy()
#normalize the flow
for key, val in flow2.items():
flow2[key] = val / totalval
return flow2
def edge_betweenness_subset_od(G, sources, targets, weight, od):
'''
Old function before betweenness centrality and flow assignment were separated.
Calculating edge betweenness centrality between only subset of nodes in the network (e.g. between districts)
Parameters
------------
G: Graph
Transport network Graph Networkx object that will be analyzed
sources: list
List of nodes (integer) that will be used as sources. The integer should correspond to
node id in G Graph
targets: list
List of nodes (integer) that will be used as targets. The integer should correspond to
node id in G Graph
weight: str
String which corresponds to attribute of G Graph's edges that will be used as penalty for each
edge. In most cases this is defined as 'length' of the edge.
od: DataFrame
OD matrix dataframe
Returns
------------
d: dict
Dictionary with edge tuple as keys (e.g. (2,3) ) and betweenness value as values
'''
#create empty dict
d={}
#iterate over all sources
for i in range(len(sources)):
source = sources[i]
#iterate over all edges
for j in range(len(targets)):
target = targets[j]
#it is assumed that there is no self-loop on the node
#e.g. there is no flow from node A to node A
if source != target :
#determine shortest path between the OD pair
sp_dijk_all = nx.dijkstra_path(G, source=source, target=target, weight=weight)
#update the betweenness value of all edges in the shortest path
flow = od[source][target]
for j in range(len(sp_dijk_all)-1):
lst = [sp_dijk_all[j],sp_dijk_all[j+1]]
lst = [min(lst), max(lst)]
tup = tuple(lst)
if tup in d.keys():
d[tup]+=1*flow
else:
d.update({tup:1*flow})
#normalize the betweenness value
totalval = (sum(od.sum()))
for key, val in d.items():
d[key] = val / totalval
#assign 0 to all edges which don't belong to any shortest path
for u,v in G.edges():
elst = [u,v]
elst = [min(elst), max(elst)]
etup = tuple(elst)
if not etup in d.keys():
d.update({etup:0})
return d
def betweenness_to_df(gdf,betweenness,betweenness_string):
'''
Append betweenness centrality result to the transport network's GeoDataFrame.
For visualization purpose later.
Parameters
------------
gdf: GeoDataFrame
GeoDataFrame (Linestring) of the original transport network
betweenness: dict
Dictionary with edge tuple as keys (e.g. (2,3) ) and betweenness value as values
betweenness_string: str
String of betweenness dictionary's object name
Returns
------------
gdf_final: GeoDataFrame
Updated gdf with additional column of betweenness centrality
betweenness_df: DataFrame
Betweenness dictionary transformed into dataframe
'''
betweenness_df = pd.DataFrame(betweenness.items(), columns=['FromTo_tuple', betweenness_string])
FromTo_tuple = betweenness_df['FromTo_tuple'].tolist()
FromTo_tolist = []
for i in FromTo_tuple:
odlist = list(i)
minval = min(odlist)
maxval = max(odlist)
val = str(minval) + str(maxval)
FromTo_tolist.append(val)
betweenness_df['FromTo'] = FromTo_tolist
c = []
for i in range(len(gdf)):
minval = min([gdf['TNODE_'][i],gdf['FNODE_'][i]])
maxval = max([gdf['TNODE_'][i],gdf['FNODE_'][i]])
val = str(minval) + str(maxval)
c.append(val)
gdf['FromTo'] = c
gdf_final = | pd.merge(gdf,betweenness_df,on='FromTo',how='outer') | pandas.merge |
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
data = pd.read_csv(path)
#plt.hist(data['Rating'])
#plt.show()
data = data[data.Rating < 6]
plt.hist(data['Rating'])
plt.show()
#Code ends here
# --------------
# code starts here
total_null = data.isna().sum()
percent_null = (total_null/data.isnull().count())
missing_data = pd.concat([total_null, percent_null], axis=1, keys=['Total','Percent'])
print(missing_data)
data.dropna(inplace=True)
total_null_1 = data.isna().sum()
percent_null_1 = (total_null_1/data.isnull().count())
missing_data_1 = | pd.concat([total_null_1, percent_null_1], axis=1, keys=['Total','Percent']) | pandas.concat |
'''
Using dataset from smart intersection, time table with TOD labels is estimated by K-Means method
* Unit: 30 minute
* Single intersection
* Go-direction traffic includes right-turn traffic
* Input dataset:
- ORT_CCTV_5MIN_LOG
- ORT_CCTV_MST
* Output:
- TOD table
- Traffic analysis according to each TOD period (Traffic: veh/30mins)
* Example code: python TOD.py --crsrd-id 1860001900 --input-dir ./data --output-dir ./result --max-tod 4
* Range of SC Interpretation
0.71-1.0 A strong structure has been found
0.51-0.70 A reasonable structure has been found
0.26-0.50 The structure is weak and could be artificial
< 0.25 No substantial structure has been found
'''
import argparse
import os
import statistics
import pandas as pd
# from yellowbrick.cluster import KElbowVisualizer
from dplython import (DplyFrame, X, select, sift, group_by, summarize)
from sklearn import preprocessing
from sklearn.cluster import KMeans
# import numpy as np
# from pandas import DataFrame
# import csv
parser = argparse.ArgumentParser()
parser.add_argument('--crsrd-id', required = True, help = 'ID for the crossroad of interest', type=str)
parser.add_argument('--input-dir', required = True, help = 'directory including inputs', type=str)
parser.add_argument('--output-dir', required = True, help = 'directory to save outputs', type=str)
parser.add_argument('--max-tod', required = False, help = 'maximum number of TOD groups', type=int, default = 4)
# parser.add_argument('--vis', required = False, help = 'visualize result(1) or not(0, default)', type=int, default = 0)
args = parser.parse_args()
pd.set_option('mode.use_inf_as_na', True)
def load_data(input_dir, crsrd_id):
cctv_log = pd.read_csv(input_dir + "/ORT_CCTV_5MIN_LOG.csv")
cctv_mst = pd.read_csv(input_dir + "/ORT_CCTV_MST.csv")
cctv_log['DATE'] = pd.DataFrame(pd.DatetimeIndex(cctv_log['REG_DT']).date)
cctv_log['HOUR'] = pd.DataFrame(pd.DatetimeIndex(cctv_log['REG_DT']).hour)
cctv_log['MINUTE'] = (pd.DataFrame( | pd.DatetimeIndex(cctv_log['REG_DT']) | pandas.DatetimeIndex |
#!/usr/bin/env python
# coding: utf-8
def haversine_vectorize(lon1, lat1, lon2, lat2):
import numpy as np
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
newlon = lon2 - lon1
newlat = lat2 - lat1
haver_formula = np.sin(newlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(newlon/2.0)**2
dist = 2 * np.arcsin(np.sqrt(haver_formula ))
km = 6367 * dist #6367 for distance in KM for miles use 3958
return round(km,2)
def CurrentHospitals(current_hospitals, network, nodes):
import pandas as pd
current_hospitals.columns = ['Hosp_ID','Longitude','Latitude','name']
current_hospitals_ID = current_hospitals['Hosp_ID'].unique()
current_hospitals['nearest_node'] = network.get_node_ids(current_hospitals['Longitude'], current_hospitals['Latitude'], mapping_distance=None)
current_hospitals = pd.merge(current_hospitals,nodes,right_on='nodeID',left_on='nearest_node')
current_hospitals['hosp_dist_road_estrada'] = haversine_vectorize(current_hospitals['Longitude'],current_hospitals['Latitude'],current_hospitals['lon'],current_hospitals['lat'])
return (current_hospitals_ID, current_hospitals)
def NewHospitalsCSV(current_hospitals, new_hospitals, network, nodes ):
from shapely import geometry, ops
import numpy as np
import pandas as pd
new_hospitals=new_hospitals[['xcoord','ycoord']]
new_hospitals.columns = ['Longitude','Latitude']
new_hospitals['Cluster_ID'] = np.arange(len(new_hospitals)) + len(current_hospitals)
new_hospitals_ID = new_hospitals['Cluster_ID'].unique()
new_hospitals['nearest_node'] = network.get_node_ids(new_hospitals['Longitude'], new_hospitals['Latitude'], mapping_distance=None)
new_hospitals = | pd.merge(new_hospitals,nodes,right_on='nodeID',left_on='nearest_node') | pandas.merge |
#Miscellaneous Functions for Fetch! Dog Adoption, not utilized
from scipy.spatial import distance
import pandas as pd
from numpy import inner
from numpy.linalg import norm
def cosine_similarity(user_predict, adoptable_dogs, images):
'''
Calculating cosine similarity between user submitted picture and adoptable dogs collection.
INPUT: predict = features from user submitted image,
adoptable_dogs = list of features in the collection,
images = list of image filepaths associated with adoptable_dogs features
OUTPUT: Pandas dataframe with image filepath and similarity score
'''
sim_score = []
for idx in range(0, len(adoptable_dogs)):
sim_score.append(distance.cosine(user_predict.flatten(), adoptable_dogs[idx].flatten()))
print('Maximum SimScore: '+str(max(sim_score)))
return pd.DataFrame({'imgFile':images, 'SimScore':sim_score})
def bray_curtis_dist(user_predict, adoptable_dogs, images):
'''
Calculating Bray-Curtis distance between two 1D arrays and return similarity score
'''
sim_score = []
for idx in range(0, len(adoptable_dogs)):
sim_score.append(distance.braycurtis(user_predict.flatten(), adoptable_dogs[idx].flatten()))
print('Maximum SimScore: '+str(max(sim_score)))
return pd.DataFrame({'imgFile':images, 'SimScore':sim_score})
def canberra_dist(user_predict, adoptable_dogs, images):
'''
Calculating Canberra distance between two 1D arrays and return similiarty score
'''
sim_score = []
for idx in range(0, len(adoptable_dogs)):
sim_score.append(distance.canberra(user_predict.flatten(), adoptable_dogs[idx].flatten()))
print('Maximum SimScore: '+str(max(sim_score)))
return | pd.DataFrame({'imgFile':images, 'SimScore':sim_score}) | pandas.DataFrame |
"""
hhpy.ipython.py
~~~~~~~~~~~~~~~
Contains convenience wrappers for ipython
"""
# ---- imports
# --- standard imports
import pandas as pd
# --- third party imports
from IPython.display import display, HTML
# --- local imports
from hhpy.main import export, assert_list, list_exclude
# ---- functions
# --- export
@export
def wide_notebook(width: int = 90):
"""
makes the jupyter notebook wider by appending html code to change the width,
based on https://stackoverflow.com/questions/21971449/how-do-i-increase-the-cell-width-of-the-jupyter-
ipython-notebook-in-my-browser
:param: width in percent, default 90 [optional]
:return: None
"""
# noinspection PyTypeChecker
display(HTML('<style>.container { width:{}% !important; }</style>'.format(width)))
@export
def hide_code():
"""
hides the code and introduces a toggle button
based on https://stackoverflow.com/questions/27934885/how-to-hide-code-from-cells-in-ipython-notebook-visualized
-with-nbviewer
:return: None
"""
# noinspection PyTypeChecker
display(HTML('''
<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
The raw code for this IPython notebook is by default hidden for easier reading.
To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.
'''))
@export
def display_full(*args, rows=None, cols=None, **kwargs):
"""
wrapper to display a pandas DataFrame with all rows and columns
:param rows: number of rows to display, defaults to all
:param cols: number of columns to display, defaults to all
:param args: passed to display
:param kwargs: passed to display
:return: None
"""
with pd.option_context('display.max_rows', rows, 'display.max_columns', cols):
display(*args, **kwargs)
@export
def pd_display(*args, number_format='{:,.2f}', full=True, **kwargs):
"""
wrapper to display a pandas DataFrame with a specified number format
:param args: passed to display
:param number_format: the number format to apply
:param full: whether to use :func:`~display_full` (True) or standard display (False)
:param kwargs: passed to display
:return: None
"""
pd.set_option('display.float_format', number_format.format)
if full:
display_full(*args, **kwargs)
else:
display(*args, **kwargs)
| pd.reset_option('display.float_format') | pandas.reset_option |
import tarfile
import anndata
import os
import pandas as pd
import scipy.sparse
import h5py
def load(data_dir, sample_fn, **kwargs):
fn = os.path.join(data_dir, 'GSE122960_RAW.tar')
with tarfile.open(fn) as tar:
f = h5py.File(tar.extractfile(f'{sample_fn}_filtered_gene_bc_matrices_h5.h5'), 'r')['GRCh38']
x = scipy.sparse.csc_matrix((f['data'], f['indices'], f['indptr']), shape=f['shape']).T
var = | pd.DataFrame({'feature_id': f['genes'], 'feature_symbol': f['gene_names']}) | pandas.DataFrame |
import numpy as np
import matplotlib.pyplot as plt
import mpl_finance as mpf
import pandas as pd
def plot_Self(file1, file2):
# data1 = pd.read_csv(file1, header=None).to_numpy()
# data2 = pd.read_csv(file2, header=None).to_numpy()
data1 = np.loadtxt(file1)
data2 = np.loadtxt(file2)
label1 = range(data1.shape[0])
label2 = range(data2.shape[0])
plt.style.use('seaborn-darkgrid')
# plt.plot(label1, data1, c='red')
# plt.plot(label2, data2, c='blue')
l1 = plt.plot(label1, data1)
l2 = plt.plot(label2, data2)
plt.legend((l1[0], l2[0]), ('JoinQuant', 'My_Data'))
plt.xlabel('Time')
plt.ylabel('Reward')
plt.show()
def plot_all(data_list, title, strategy):
colors = ['red', 'blue', 'green', 'orange', 'black', 'pink', 'yellow']
legend = [
['Real', 'interpolation_worst', 'interpolation_best', 'resample_worst', 'resample_best'],
['Real', 'interpolation_best', 'interpolation_worst', 'resample_best', 'resample_worst']
]
plt.style.use('seaborn-darkgrid')
for i in range(len(data_list)):
data = np.array(data_list[i])
x = range(data.shape[0])
plt.plot(x, data, c=colors[i], label=legend[strategy][i])
plt.legend()
plt.title(title)
plt.xlabel('Time')
plt.ylabel('Reward')
plt.show()
def pick(daily_data, sum_data, all_data):
# (10, 2, 500, 4)
HMM_all_field_daily_data = daily_data
# (10, 2, 11)
HMM_all_field_sum_data = sum_data
# (10, 300, 500, 13)
HMM_all_stock = all_data
strategy_num = HMM_all_field_sum_data.shape[1]
strategy_id = []
max_universe_total_reward_index = []
min_universe_total_reward_index = []
max_daily_data = []
min_daily_data = []
for i in range(strategy_num):
strategy_id.append(i)
# print(HMM_all_field_sum_data[:, i, 0])
max_index = np.argmax(HMM_all_field_sum_data[:, i, 0])
max_universe_total_reward_index.append(max_index)
min_index = np.argmin(HMM_all_field_sum_data[:, i, 0])
min_universe_total_reward_index.append(min_index)
# test = HMM_all_field_daily_data[max_index][i].T
max_daily_data.append(HMM_all_field_daily_data[max_index][i].T)
min_daily_data.append(HMM_all_field_daily_data[min_index][i].T)
# 返回值:策略(1, 2),每日数据
return strategy_id, max_universe_total_reward_index, min_universe_total_reward_index, max_daily_data, min_daily_data
def get_return_reward(daily_list, init_money=1000000):
tmp = init_money
list = []
for i in range(daily_list.shape[0]):
tmp += daily_list[i]
list.append(tmp)
return np.array(list)
def recon_data(k_data):
# require [date, open, close, high, low, volume]
k_data[:, [1, 2]] = k_data[:, [2, 1]]
tmp = range(500)
data = np.c_[tmp, k_data]
pd_data = | pd.DataFrame(data) | pandas.DataFrame |
###########################################################
# Encode
###########################################################
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn import preprocessing,model_selection, ensemble
from sklearn.preprocessing import LabelEncoder
import scipy.stats as ss
from sklearn.externals import joblib
from scipy.sparse import csr_matrix
def cat2MedianShiftEncode(train_char, test_char, y, nbag = 10, nfold = 20, minCount = 3):
train_df = train_char.copy()
test_df = test_char.copy()
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
med_y = np.median(y)
enc_mat = np.zeros((y.shape[0],1))
enc_mat_test = np.zeros((test_char.shape[0],1))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(c).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
datax = datax.loc[datax.y_len > minCount]
datax[c+'_medshftenc'] = datax['y_median']-med_y
datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
print(datax.columns)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(0)
datatst = datatst.join(datax,on=[c], how='left').fillna(0)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[c+'_medshftenc'+str(x) for x in enc_mat.columns]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=[enc_mat.columns]
train_df = pd.concat((enc_mat.reset_index(drop = True),train_df.reset_index(drop = True)), axis=1)
test_df = pd.concat([enc_mat_test.reset_index(drop = True),test_df.reset_index(drop = True)],axis=1)
return train_df, test_df
def cat2MeanShiftEncode(train_char, test_char, y, nbag = 10, nfold = 20, minCount = 3):
train_df = train_char.copy()
test_df = test_char.copy()
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
mean_y = np.mean(y)
enc_mat = np.zeros((y.shape[0],1))
enc_mat_test = np.zeros((test_char.shape[0],1))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(c).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
datax = datax.loc[datax.y_len > minCount]
datax[c+'_meanshftenc'] = datax['y_mean'] - mean_y
datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
print(datax.columns)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(0)
datatst = datatst.join(datax,on=[c], how='left').fillna(0)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold*nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[c+'_meanshftenc'+str(x) for x in enc_mat.columns]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=[enc_mat.columns]
train_df = pd.concat((enc_mat.reset_index(drop = True),train_df.reset_index(drop = True)), axis=1)
test_df = pd.concat([enc_mat_test.reset_index(drop = True),test_df.reset_index(drop = True)],axis=1)
return train_df, test_df
def cat2MeanEncode(train_char, test_char, y, nbag = 10, nfold = 20, minCount = 3):
train_df = train_char.copy()
test_df = test_char.copy()
rn = np.mean(y)
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],1))
enc_mat_test = np.zeros((test_char.shape[0],1))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(c).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
datax = datax.loc[datax.y_len > minCount]
datax[c+'_meanenc'] = datax['y_mean']
datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
print(datax.columns)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(rn)
datatst = datatst.join(datax,on=[c], how='left').fillna(rn)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold*nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[c+'_meanenc'+str(x) for x in enc_mat.columns]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=[enc_mat.columns]
train_df = pd.concat((enc_mat,train_df), axis=1)
test_df = pd.concat([enc_mat_test,test_df],axis=1)
return train_df, test_df
def cat2MedianEncode(train_char, test_char, y, nbag = 10, nfold = 20, minCount = 3):
train_df = train_char.copy()
test_df = test_char.copy()
rn = np.mean(y)
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],1))
enc_mat_test = np.zeros((test_char.shape[0],1))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(c).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
datax = datax.loc[datax.y_len > minCount]
datax[c+'_medianenc'] = datax['y_mean']
datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
print(datax.columns)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(rn)
datatst = datatst.join(datax,on=[c], how='left').fillna(rn)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold*nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[c+'_medianenc'+str(x) for x in enc_mat.columns]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=[enc_mat.columns]
train_df = pd.concat((enc_mat,train_df), axis=1)
test_df = pd.concat([enc_mat_test,test_df],axis=1)
return train_df, test_df
def countEncode(train_char, test_char, y, nbag = 10, nfold = 20, minCount = 3):
train_df = train_char.copy()
test_df = test_char.copy()
rn = 999
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],1))
enc_mat_test = np.zeros((test_char.shape[0],1))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(c).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
datax = datax.loc[datax.y_len > minCount]
datax[c+'_countenc'] = datax['y_len']
datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
print(datax.columns)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(rn)
datatst = datatst.join(datax,on=[c], how='left').fillna(rn)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold * nbag)
enc_mat /= nbag
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[c+'_countenc'+str(x) for x in enc_mat.columns]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=[enc_mat.columns]
train_df = pd.concat((enc_mat,train_df), axis=1)
test_df = pd.concat([enc_mat_test,test_df],axis=1)
return train_df, test_df
def rankCountEncode(train_char, test_char, y, nbag = 10, nfold = 20, minCount = 3):
train_df = train_char.copy()
test_df = test_char.copy()
rn = 999
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],1))
enc_mat_test = np.zeros((test_char.shape[0],1))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(c).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
datax = datax.loc[datax.y_len > minCount]
datax[c+'_rankenc'] = datax['y_len']
datax[c+'_rankenc'] = ss.rankdata(datax[c+'_rankenc'].values)
datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
print(datax.columns)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(rn)
datatst = datatst.join(datax,on=[c], how='left').fillna(rn)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[c+'_rankenc'+str(x) for x in enc_mat.columns]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=[enc_mat.columns]
train_df = | pd.concat((enc_mat,train_df), axis=1) | pandas.concat |
import numpy as np
import glob
import pandas as pd
import os
import time
from tqdm.auto import tqdm
from .misc import dfMirror
########################################################################################################################
# it's best to use asciiToDfMulti() (which exploits this asciiToDf()) also for single file opening
def asciiToDf(
nameFormat,
asciiMap,
nLinesEv = 1,
descFrac = 1,
mirrorMap = (), # this is a tuple here, but a dictionary in asciiToDfMulti() (i.e. the "main" function)
bVerbose = False,
bProgress = False,
):
t0 = time.time() # chronometer start
names = sorted(glob.glob(nameFormat.replace("YYYYYY", "*"))) # list of all the filenames of the current run
df = pd.DataFrame()
descFrac = 1e-12 if descFrac <= 0 else (descFrac if descFrac <= 1 else 1)
for iName in tqdm((names)) if (bVerbose & bProgress) else names:
if os.stat(iName).st_size > 0:
if nLinesEv == 1:
dataTableTemp = np.loadtxt(iName, unpack=False, ndmin=2)
else:
fileToString0 = open(iName,'r').read()
fileToStringSplitted0 = fileToString0.splitlines()
fileToString = ""
for i, iLine in enumerate(fileToStringSplitted0):
if (i%nLinesEv==nLinesEv-1):
fileToString += iLine + "\n"
else:
fileToString += iLine + " "
fileToStringSplitted = fileToString.splitlines()
dataTableTemp = np.loadtxt(fileToStringSplitted)
dfTemp = pd.DataFrame(dataTableTemp, columns=asciiMap)
df = df.append(dfTemp[dfTemp.index % int(1 / descFrac) == 0], ignore_index=True, sort=False)
df = dfMirror(df, mirrorMap)
t1 = time.time() # chronometer stop
dt = t1 - t0
return df, dt
########################################################################################################################
def asciiToDfMulti(
nameFormat,
fileIndex,
asciiMap,
fileIndexName = "iIndex",
nLinesEv = 1,
descFrac = {},
mirrorMap = {},
bVerbose = False,
bProgress = False,
):
t0 = time.time() # chronometer start
df = | pd.DataFrame() | pandas.DataFrame |
# Copyright (c) 2020, <NAME>.
# Distributed under the MIT License. See LICENSE for more info.
"""
Scree plot
==========
This example will show the eigenvalues of principal components
from a
`principal component analysis
<https://en.wikipedia.org/wiki/Principal_component_analysis>`_.
"""
from matplotlib import pyplot as plt
import pandas as pd
from sklearn.datasets import load_wine
from sklearn.preprocessing import scale
from sklearn.decomposition import PCA
from psynlig import pca_scree
plt.style.use('seaborn')
plt.rcParams.update({'font.size': 16})
data_set = load_wine()
data = | pd.DataFrame(data_set['data'], columns=data_set['feature_names']) | pandas.DataFrame |
"""
<NAME>
Test 2
Exploratory data analysis for the admissions dataset
"""
import pandas as pd
import matplotlib.pyplot as plt
from mlxtend.plotting import scatterplotmatrix
import numpy as np
from mlxtend.plotting import heatmap
from sklearn.preprocessing import OneHotEncoder
import sys
#read the data into a pandas dataframe
df = | pd.read_csv('Admission_Predict.csv') | pandas.read_csv |
import json
import os
import re
from typing import List
import numpy as np
import pandas as pd
from common import camel_case_to_snake_case, load_institutions, isnumber
def convert_initial_to_row(data: dict, rank: int, document_specific=False) -> List:
row = [rank, data["name"], data["country"]]
if document_specific:
row = []
for metric in data["metrics"]:
if "valueByYear" in metric:
for year in metric["valueByYear"]:
row.append(metric["valueByYear"][year])
if "values" in metric:
for value in metric["values"]:
for year in value["valueByYear"]:
row.append(value["valueByYear"][year])
return row
def get_header_by_year(metric_name: str, metric: dict, percentage=False) -> List[str]:
if percentage:
return [f"{metric_name}_percentage_{year}" for year in metric['percentageByYear']]
else:
return [f"{metric_name}_{year}" for year in metric["valueByYear"]]
def get_headers(data: dict, document_type: str = None) -> List[str]:
headers = ["rank", "name", "country"]
if document_type is not None:
headers = []
for metric in data["metrics"]:
metric_name = camel_case_to_snake_case(metric["metricType"])
if document_type is not None:
metric_name = f"{metric_name}_{document_type}"
if "valueByYear" in metric:
headers.extend(get_header_by_year(metric_name, metric))
# if "percentageByYear" in metric:
# headers.extend(get_header_by_year(metric_name, metric, percentage=True))
if "values" in metric:
for value in metric["values"]:
if "collabType" in value:
if "corporate" in metric_name:
if "No" in value["collabType"]:
composed_name = f"no_{metric_name}"
else:
composed_name = f"{metric_name}"
else:
collab_type = value["collabType"].split(" ")[0].lower()
composed_name = f"{metric_name}_{collab_type}"
else:
threshold = value['threshold']
if document_type is None:
parts = metric_name.rsplit('_', 2)
composed_name = f"{parts[0]}_{threshold}percent_{parts[1]}"
else:
parts = metric_name.rsplit('_', 3)
composed_name = f"{parts[0]}_{threshold}percent_{parts[1]}_{parts[3]}"
headers.extend(get_header_by_year(composed_name, value))
# headers.extend(get_header_by_year(composed_name, value, percentage=True))
headers = [camel_case_to_snake_case(x).replace("__", "_") for x in headers]
return headers
def get_author_info() -> pd.DataFrame:
headers = ["top_500_authors_output_avg", "top_500_authors_output_annual_avg", "top_500_authors_hindex_avg",
"top_500_authors_citacions_avg", "top_500_authors_citacions_annual_avg",
"top_500_authors_citacions_per_publication_avg", "top_500_authors_citacions_per_publication_annual_avg"]
pattern = re.compile(r"\d{1,3}_.*")
result = pd.DataFrame(columns=headers)
dirs = [x for x in os.listdir("Data/Scopus") if pattern.match(x)]
for directory in dirs:
file_path = os.path.join("Data/Scopus", directory, "List_of_authors.csv")
a = pd.read_csv(file_path, header=9)
a = a.iloc[:-1, [1, 3, 4, 5, 6]]
a = a.mean()
serie = pd.Series(index=headers)
serie.name = int(directory.split("_")[0])
serie["top_500_authors_output_avg"] = a[0]
serie["top_500_authors_output_annual_avg"] = a[0] / 5
serie["top_500_authors_hindex_avg"] = a[4]
serie["top_500_authors_citacions_avg"] = a[1]
serie["top_500_authors_citacions_annual_avg"] = a[1] / 5
serie["top_500_authors_citacions_per_publication_avg"] = a[2]
serie["top_500_authors_citacions_per_publication_annual_avg"] = a[0] / 5
result = result.append(serie)
left = [x for x in range(0, 200) if x not in result.index]
for i in left:
result = result.append(pd.Series(index=headers, name=i))
return result.sort_index()
def get_funding_info() -> pd.DataFrame:
headers = ["grants_value", "grants_value_per_year", "grants_value_growth", "number_of_grants",
"number_of_grants_per_year", "number_of_grants_growth", "number_of_sponsors"]
pattern = re.compile(r"\d{1,3}_.*")
result = pd.DataFrame(columns=headers)
dirs = [x for x in os.listdir("Data/Scopus") if pattern.match(x)]
for directory in dirs:
index = int(directory.split("_")[0])
serie = pd.Series(index=headers)
serie.name = index
try:
file_path = os.path.join("Data/Scopus", directory, "Awarded_Grants_by_Funding_Body.csv")
a = pd.read_csv(file_path, header=6)
a = a.iloc[:-1, [2, 4, 5, 6]]
a = a[a.applymap(isnumber)]
a.iloc[:, 1] = pd.to_numeric(a.iloc[:, 1])
a.iloc[:, 3] = | pd.to_numeric(a.iloc[:, 3]) | pandas.to_numeric |
import gensim
import numpy as np
import pandas as pd
import psycopg2
import re
import os
import time
import warnings
warnings.filterwarnings('ignore')
my_time = time.time() # global time setter for timer_func() debugging purposes
def fill_id(id):
"""Adds leading zeroes back if necessary. This makes the id match the database."""
if len(str(id)) < 7:
length = len(str(id))
id = "0"*(7 - length) + str(id)
return str(id)
def df_to_id_list(df, id_book):
"""Converts dataframe of movies to a list of the IDs for those movies.
Every title in the input dataframe is checked against the local file, which
includes all the titles and IDs in our database. For anything without a match,
replace the non-alphanumeric characters with wildcards, and query the database
for matches.
"""
df['Year'] = df['Year'].astype(int).astype(str)
matched = pd.merge(df, id_book,
left_on=['Name', 'Year'], right_on=['primaryTitle', 'startYear'],
how='inner')
ids = matched['tconst'].astype(str).tolist()
final_ratings = []
names = df.Name.tolist()
years = [int(year) for year in df.Year.tolist()]
if 'Rating' in df.columns:
stars = [int(rating) for rating in df.Rating.tolist()]
info = list(zip(names, years, stars))
final_ratings = matched['Rating'].astype(int).tolist()
else:
info = list(zip(names, years, list(range(len(years)))))
missed = [x for x in info if x[0] not in matched['primaryTitle'].tolist()]
for i, j, k in missed:
i = re.sub('[^\s0-9a-zA-Z\s]+', '%', i)
try:
# TODO: fix this cursor so it actually references something.
cursor_dog.execute(f"""
SELECT movie_id, original_title, primary_title
FROM movies
WHERE primary_title ILIKE '{i}' AND start_year = {j}
OR original_title ILIKE '{i}' AND start_year = {j}
ORDER BY runtime_minutes DESC
LIMIT 1""")
id = cursor_dog.fetchone()[0]
ids.append(id)
final_ratings.append(k)
except Exception as e:
continue
final_ratings = [x*2 for x in final_ratings]
ratings_dict = dict(zip(ids, final_ratings))
return tuple([[fill_id(id) for id in ids], ratings_dict])
def prep_data(ratings_df, watched_df=None, watchlist_df=None,
good_threshold=4, bad_threshold=3):
"""Converts dataframes of exported Letterboxd data to lists of movie_ids.
Parameters
----------
ratings_df : pd dataframe
Letterboxd ratings.
watched_df : pd dataframe
Letterboxd watch history.
watchlist_df : pd dataframe
Letterboxd list of movies the user wants to watch.
Used in val_list for scoring the model's performance.
good_threshold : int
Minimum star rating (10pt scale) for a movie to be considered "enjoyed" by the user.
bad_threshold : int
Maximum star rating (10pt scale) for a movie to be considered "disliked" by the user.
Returns
-------
tuple of lists of ids.
(good_list, bad_list, hist_list, val_list)
"""
id_book = pd.read_csv('title_basics_small.csv')
try:
# try to read Letterboxd user data
# drop rows with nulls in the columns we use
ratings_df = ratings_df.dropna(axis=0, subset=['Rating', 'Name', 'Year'])
# split according to user rating
good_df = ratings_df[ratings_df['Rating'] >= good_threshold]
bad_df = ratings_df[ratings_df['Rating'] <= bad_threshold]
neutral_df = ratings_df[(ratings_df['Rating'] > bad_threshold) & (ratings_df['Rating'] < good_threshold)]
# convert dataframes to lists
good_list, good_dict = df_to_id_list(good_df, id_book)
bad_list, bad_dict = df_to_id_list(bad_df, id_book)
neutral_list, neutral_dict = df_to_id_list(neutral_df, id_book)
except KeyError:
# Try to read IMDb user data
# strip ids of "tt" prefix
ratings_df['movie_id'] = ratings_df['Const'].apply(lambda x: str(x).lstrip("tt"))
# drop rows with nulls in the columns we use
ratings_df = ratings_df.dropna(axis=0, subset=['Your Rating', 'Year'])
# split according to user rating
good_df = ratings_df[ratings_df['Your Rating'] >= good_threshold*2]
bad_df = ratings_df[ratings_df['Your Rating'] <= bad_threshold*2]
neutral_df = ratings_df[(ratings_df['Your Rating'] > bad_threshold*2) & (ratings_df['Your Rating'] < good_threshold*2)]
# convert dataframes to lists
good_list = good_df['movie_id'].to_list()
bad_list = bad_df['movie_id'].to_list()
neutral_list = neutral_df['movie_id'].to_list()
# make ratings dictionaries
good_dict = dict(zip(good_list, good_df['Your Rating'].tolist()))
bad_dict = dict(zip(bad_list, bad_df['Your Rating'].tolist()))
neutral_dict = dict(zip(neutral_list, neutral_df['Your Rating'].tolist()))
except Exception as e:
# can't read the dataframe as Letterboxd or IMDb user data
print("This dataframe has columns:", ratings_df.columns)
raise Exception(e)
ratings_dict = dict(list(good_dict.items()) + list(bad_dict.items()) + list(neutral_dict.items()))
if (watched_df is not None) and (not watched_df.empty):
# Construct list of watched movies that aren't rated "good" or "bad"
# First, get a set of identified IDs.
rated_names = set(good_df.Name.tolist() + bad_df.Name.tolist() + neutral_list)
# drop nulls from watched dataframe
full_history = watched_df.dropna(axis=0, subset=['Name', 'Year'])
# get list of watched movies that haven't been rated
hist_list = df_to_id_list(full_history[~full_history['Name'].isin(rated_names)], id_book)[0]
# add back list of "neutral" movies (whose IDs we already found before)
hist_list = hist_list + neutral_list
else: hist_list = neutral_list
if (watchlist_df is not None) and (not watchlist_df.empty):
try:
watchlist_df = watchlist_df.dropna(axis=0, subset=['Name', 'Year'])
val_list = df_to_id_list(watchlist_df, id_book)[0]
except KeyError:
watchlist_df = watchlist_df.dropna(axis=0, subset=['Const', 'Year'])
watchlist_df['movie_id'] = watchlist_df['Const'].str.lstrip("tt")
val_list = watchlist_df['movie_id'].tolist()
else: val_list = []
return (good_list, bad_list, hist_list, val_list, ratings_dict)
class Recommender(object):
def __init__(self, model_path):
"""Initialize model with name of .model file"""
self.model_path = model_path
self.model = None
self.cursor_dog = None
self.id_book = | pd.read_csv('title_basics_small.csv') | pandas.read_csv |
from suzieq.gui.guiutils import display_help_icon
from suzieq.gui.guiutils import (gui_get_df, get_base_url, get_session_id,
SuzieqMainPages)
from suzieq.sqobjects.path import PathObj
from copy import copy
from urllib.parse import quote
from typing import Tuple
import graphviz as graphviz
import pandas as pd
import streamlit as st
from dataclasses import dataclass, asdict, field
def get_title():
return 'Path'
def make_fields_failed_df():
'''return a dictionary as this is the only way dataclassses work'''
return [
{'name': 'device', 'df': pd.DataFrame(), 'query': 'status == "dead"'},
{'name': 'interfaces', 'df': | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as st
import joblib
import pickle
'''
定义全局变量
'''
# df_hx = pd.read_csv('./data/hx_js.csv', header=0)
# df_xfx = pd.read_csv('./data/xfx_js.csv', header=0)
def get_time_tuple(df):
res = []
get_operate_date = lambda x:(pd.to_datetime(x) + pd.Timedelta(hours=4)).strftime('%Y-%m-%d')
get_operator_time = lambda x:pd.to_datetime(x)
df['date'] = df['指令时间'].apply(get_operate_date)
df['time'] = df['指令时间'].apply(get_operator_time)
for date, item in df.groupby('date'):
## 搜索进水指令
cond_start = item['指令'] == '进水'
if sum(cond_start) == 1:
start_time = item[cond_start].iloc[0,-1]
elif sum(cond_start) == 0:
start_time = 0
print(f'{date} 没有进水指令')
else:
start_time = 0
print(f'{date} 多条进水指令')
## 搜索关水指令
cond_end = item['指令'] == '关水'
if sum(cond_end) == 1:
end_time = item[cond_end].iloc[0, -1]
elif sum(cond_end) == 0:
end_time = 0
print(f'{date} 没有关水指令')
else:
end_time = 0
print(f'{date} 多条关水指令')
if start_time!= 0 and end_time != 0:
duration = (end_time-start_time).seconds/3600
res_record = [date, start_time, end_time, duration]
res.append(res_record)
df_res = pd.DataFrame(res, columns=['date','start_time','end_time','duration'])
return df_res
def get_std_time(x):
hour = int(x.strftime('%H'))
if hour > 21:
return pd.to_datetime(x.strftime('%H:%M:%S'))- | pd.Timedelta(days=1) | pandas.Timedelta |
import os
import yaml
import argparse
import numpy as np
import pandas as pd
from pycytominer import audit
from scripts.viz_utils import plot_replicate_correlation, plot_replicate_density
parser = argparse.ArgumentParser()
parser.add_argument("--config", help="configuration yaml file for batch information")
parser.add_argument(
"--profile_dir",
help="directory storing profiles",
default="../0.generate-profiles/profiles",
)
parser.add_argument(
"--output_dir", help="directory where to save audit results", default="results"
)
parser.add_argument(
"--figure_dir", help="directory where to save audit figures", default="figures"
)
args = parser.parse_args()
config = args.config
profile_dir = args.profile_dir
output_dir = args.output_dir
figure_dir = args.figure_dir
np.random.seed(1234)
output_file_extensions = [".png"]
audit_config = {}
stream = open(config, "r")
for data in yaml.load_all(stream, Loader=yaml.FullLoader):
batch = data["batch"]
audit_level = data["auditlevel"]
plates = [str(x) for x in data["plates"]]
audit_config[batch] = {}
audit_config[batch]["plates"] = plates
audit_config[batch]["auditcols"] = data["auditcols"]
audit_config[batch]["process"] = data["process"]
audit_config[batch]["plate_files"] = {
x: os.path.join(profile_dir, batch, x, "{}_{}.csv.gz".format(x, audit_level))
for x in plates
}
for batch in audit_config:
batch_dict = audit_config[batch]
process = batch_dict["process"]
if not process:
continue
audit_cols = batch_dict["auditcols"]
plate_files = batch_dict["plate_files"]
plates = batch_dict["plates"]
for plate in plates:
print("Now auditing... Batch: {}; Plate: {}".format(batch, plate))
audit_output_dir = os.path.join(output_dir, batch, plate)
os.makedirs(audit_output_dir, exist_ok=True)
figure_output_dir = os.path.join(figure_dir, batch, plate)
os.makedirs(figure_output_dir, exist_ok=True)
audit_output_file = os.path.join(audit_output_dir, "{}_audit.csv".format(plate))
df = pd.read_csv(plate_files[plate])
audit(
df,
audit_groups=audit_cols,
audit_resolution="full",
output_file=audit_output_file,
)
audit_df = | pd.read_csv(audit_output_file) | pandas.read_csv |
import requests
import bs4
import sqlite3
import pandas as pd
hr_db_filename = 'C:/Users/Jeff/Google Drive/research/Hampton Roads Data/Time Series/' \
'hampt_rd_data.sqlite'
def get_id(typ, data):
"""
gets either the siteid or variableid from the db
:param typ: String. Either "Site" or "Variable"
:param data: Dict. the site or variable data
:return: int. id of site or variable
"""
data_df = pd.DataFrame(data, index=[0])
code_name = '{}Code'.format(typ)
table_name = '{}s'.format(typ.lower())
id_name = '{}ID'.format(typ)
code = data[code_name]
check_by = [code_name]
append_non_duplicates(table_name, data_df, check_by)
table = get_db_table_as_df(table_name)
id_row = table[table[code_name] == code]
id_num = id_row[id_name].values[0]
return id_num
def append_non_duplicates(table, df, check_col, site_id=None, var_id=None):
"""
adds values that are not already in the db to the db
:param table: String. name of table where the values should be added e.g. 'sites'
:param df: pandas df. a dataframe with the data to be potentially added to the db
:param check_col: List. the columns that will be used to check for duplicates in db e.g.
'VariableCode' and 'VariableType' if checking a variable
:return: pandas df. a dataframe with the non duplicated values
"""
con = sqlite3.connect(hr_db_filename)
if table =='datavalues' and site_id and var_id:
sql = "SELECT * FROM datavalues WHERE SiteID = {} AND VariableID = {}".format(site_id,
var_id)
db_df = get_db_table_as_df(table, sql)
else:
db_df = get_db_table_as_df(table)
if not db_df.empty:
if table == 'datavalues':
df.reset_index(inplace=True)
db_df.reset_index(inplace=True)
merged = df.merge(db_df,
how='outer',
on=check_col,
indicator=True)
non_duplicated = merged[merged._merge == 'left_only']
filter_cols = [col for col in list(non_duplicated) if "_y" not in col and "_m" not in col]
non_duplicated = non_duplicated[filter_cols]
cols_clean = [col.replace('_x', '') for col in list(non_duplicated)]
non_duplicated.columns = cols_clean
non_duplicated = non_duplicated[df.columns]
non_duplicated.to_sql(table, con, if_exists='append', index=False)
return df
else:
index = True if table == 'datavalues' else False
df.to_sql(table, con, if_exists='append', index=index)
return df
def get_db_table_as_df(name, sql="""SELECT * FROM {};""", date_col=None, dbfilename=hr_db_filename):
con = sqlite3.connect(dbfilename)
sql = sql.format(name)
if name == 'datavalues':
date_col = 'Datetime'
df = pd.read_sql(sql, con, parse_dates=date_col)
if name == 'datavalues':
df = make_date_index(df, 'Datetime')
return df
def make_date_index(df, field, fmt=None):
df.loc[:, field] = pd.to_datetime(df.loc[:, field], format=fmt)
df.set_index(field, drop=True, inplace=True)
return df
def get_table_for_variable_code(variable_code, site_id=None, start_date=None, end_date=None):
var_id = get_id('Variable', {'VariableCode': variable_code})
table_name = 'datavalues'
sql = """SELECT * FROM {} WHERE VariableID={};""".format(table_name, var_id)
if start_date or end_date:
if not start_date:
start_date = '1900-01-01'
elif not end_date:
end_date = '2100-01-01'
sql = """SELECT * FROM {} WHERE VariableID={} AND Datetime BETWEEN '{}' AND '{}';""".format(
table_name,
var_id,
start_date,
end_date
)
df = get_db_table_as_df(table_name, sql=sql)
df = df.sort_index()
if site_id:
df = df[df['SiteID'] == site_id]
return df
def get_code_from_id(typ, id):
"""
:param typ: string 'Variable' or 'Site'
:param id: int
:return:
"""
table_name = '{}s'.format(typ.lower())
table = get_db_table_as_df(table_name)
code_name = '{}Code'.format(typ)
id_name = '{}ID'.format(typ)
return table[table[id_name] == id][code_name].values[0]
def parse_wml2_data(wml2url, src_org):
"""
parses wml2 data into pandas dataframe and adds the data, including the site and variable, into
the database if not already in there
:param wml2url: String. the service response in wml2 format
:param src_org: String. the organization e.g. "USGS"
:return: dataframe of the time series
"""
con = sqlite3.connect(hr_db_filename)
soup = get_server_data(wml2url)
res_list = []
site_data = get_site_data(soup, src_org)
site_id = get_id('Site', site_data)
variable_block = soup.find_all('wml2:observationmember')
for v in variable_block:
value_tags_list = v.find_all('wml2:point')
variable_data = get_variable_data(v)
variable_id = get_id('Variable', variable_data)
for value_tag in value_tags_list:
datetime = value_tag.find('wml2:time').text
val = value_tag.find('wml2:value').text
res = {'VariableID': variable_id,
'SiteID': site_id,
'Value': val,
'Datetime': datetime,
}
res_list.append(res)
df = pd.DataFrame(res_list)
df['Value'] = | pd.to_numeric(df['Value']) | pandas.to_numeric |
"""
Experimental manager based on storing a collection of 1D arrays
"""
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
Callable,
TypeVar,
)
import numpy as np
from pandas._libs import (
NaT,
lib,
)
from pandas._typing import (
ArrayLike,
Hashable,
)
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
astype_array_safe,
infer_dtype_from_scalar,
soft_convert_objects,
)
from pandas.core.dtypes.common import (
ensure_int64,
is_datetime64_ns_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_numeric_dtype,
is_object_dtype,
is_timedelta64_ns_dtype,
)
from pandas.core.dtypes.dtypes import (
ExtensionDtype,
PandasDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCPandasArray,
ABCSeries,
)
from pandas.core.dtypes.inference import is_inferred_bool_dtype
from pandas.core.dtypes.missing import (
array_equals,
isna,
)
import pandas.core.algorithms as algos
from pandas.core.array_algos.quantile import quantile_compat
from pandas.core.array_algos.take import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
PandasArray,
TimedeltaArray,
)
from pandas.core.arrays.sparse import SparseDtype
from pandas.core.construction import (
ensure_wrapped_if_datetimelike,
extract_array,
sanitize_array,
)
from pandas.core.indexers import (
maybe_convert_indices,
validate_indices,
)
from pandas.core.indexes.api import (
Index,
ensure_index,
)
from pandas.core.internals.base import (
DataManager,
SingleDataManager,
interleaved_dtype,
)
from pandas.core.internals.blocks import (
ensure_block_shape,
external_values,
new_block,
to_native_types,
)
if TYPE_CHECKING:
from pandas import Float64Index
T = TypeVar("T", bound="ArrayManager")
class ArrayManager(DataManager):
"""
Core internal data structure to implement DataFrame and Series.
Alternative to the BlockManager, storing a list of 1D arrays instead of
Blocks.
This is *not* a public API class
Parameters
----------
arrays : Sequence of arrays
axes : Sequence of Index
verify_integrity : bool, default True
"""
__slots__ = [
"_axes", # private attribute, because 'axes' has different order, see below
"arrays",
]
arrays: list[np.ndarray | ExtensionArray]
_axes: list[Index]
def __init__(
self,
arrays: list[np.ndarray | ExtensionArray],
axes: list[Index],
verify_integrity: bool = True,
):
# Note: we are storing the axes in "_axes" in the (row, columns) order
# which contrasts the order how it is stored in BlockManager
self._axes = axes
self.arrays = arrays
if verify_integrity:
self._axes = [ensure_index(ax) for ax in axes]
self.arrays = [ensure_wrapped_if_datetimelike(arr) for arr in arrays]
self._verify_integrity()
def make_empty(self: T, axes=None) -> T:
"""Return an empty ArrayManager with the items axis of len 0 (no columns)"""
if axes is None:
axes = [self.axes[1:], Index([])]
arrays: list[np.ndarray | ExtensionArray] = []
return type(self)(arrays, axes)
@property
def items(self) -> Index:
return self._axes[-1]
@property
# error: Signature of "axes" incompatible with supertype "DataManager"
def axes(self) -> list[Index]: # type: ignore[override]
# mypy doesn't work to override attribute with property
# see https://github.com/python/mypy/issues/4125
"""Axes is BlockManager-compatible order (columns, rows)"""
return [self._axes[1], self._axes[0]]
@property
def shape_proper(self) -> tuple[int, ...]:
# this returns (n_rows, n_columns)
return tuple(len(ax) for ax in self._axes)
@staticmethod
def _normalize_axis(axis: int) -> int:
# switch axis
axis = 1 if axis == 0 else 0
return axis
def set_axis(
self, axis: int, new_labels: Index, verify_integrity: bool = True
) -> None:
# Caller is responsible for ensuring we have an Index object.
axis = self._normalize_axis(axis)
if verify_integrity:
old_len = len(self._axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError(
f"Length mismatch: Expected axis has {old_len} elements, new "
f"values have {new_len} elements"
)
self._axes[axis] = new_labels
def consolidate(self) -> ArrayManager:
return self
def is_consolidated(self) -> bool:
return True
def _consolidate_inplace(self) -> None:
pass
def get_dtypes(self):
return np.array([arr.dtype for arr in self.arrays], dtype="object")
# TODO setstate getstate
def __repr__(self) -> str:
output = type(self).__name__
output += f"\nIndex: {self._axes[0]}"
if self.ndim == 2:
output += f"\nColumns: {self._axes[1]}"
output += f"\n{len(self.arrays)} arrays:"
for arr in self.arrays:
output += f"\n{arr.dtype}"
return output
def _verify_integrity(self) -> None:
n_rows, n_columns = self.shape_proper
if not len(self.arrays) == n_columns:
raise ValueError(
"Number of passed arrays must equal the size of the column Index: "
f"{len(self.arrays)} arrays vs {n_columns} columns."
)
for arr in self.arrays:
if not len(arr) == n_rows:
raise ValueError(
"Passed arrays should have the same length as the rows Index: "
f"{len(arr)} vs {n_rows} rows"
)
if not isinstance(arr, (np.ndarray, ExtensionArray)):
raise ValueError(
"Passed arrays should be np.ndarray or ExtensionArray instances, "
f"got {type(arr)} instead"
)
if not arr.ndim == 1:
raise ValueError(
"Passed arrays should be 1-dimensional, got array with "
f"{arr.ndim} dimensions instead."
)
def reduce(
self: T, func: Callable, ignore_failures: bool = False
) -> tuple[T, np.ndarray]:
"""
Apply reduction function column-wise, returning a single-row ArrayManager.
Parameters
----------
func : reduction function
ignore_failures : bool, default False
Whether to drop columns where func raises TypeError.
Returns
-------
ArrayManager
np.ndarray
Indexer of column indices that are retained.
"""
result_arrays: list[np.ndarray] = []
result_indices: list[int] = []
for i, arr in enumerate(self.arrays):
try:
res = func(arr, axis=0)
except TypeError:
if not ignore_failures:
raise
else:
# TODO NaT doesn't preserve dtype, so we need to ensure to create
# a timedelta result array if original was timedelta
# what if datetime results in timedelta? (eg std)
if res is NaT and is_timedelta64_ns_dtype(arr.dtype):
result_arrays.append(np.array(["NaT"], dtype="timedelta64[ns]"))
else:
# error: Argument 1 to "append" of "list" has incompatible type
# "ExtensionArray"; expected "ndarray"
result_arrays.append(
sanitize_array([res], None) # type: ignore[arg-type]
)
result_indices.append(i)
index = Index._simple_new(np.array([None], dtype=object)) # placeholder
if ignore_failures:
indexer = np.array(result_indices)
columns = self.items[result_indices]
else:
indexer = np.arange(self.shape[0])
columns = self.items
# error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]";
# expected "List[Union[ndarray, ExtensionArray]]"
new_mgr = type(self)(result_arrays, [index, columns]) # type: ignore[arg-type]
return new_mgr, indexer
def grouped_reduce(self: T, func: Callable, ignore_failures: bool = False) -> T:
"""
Apply grouped reduction function columnwise, returning a new ArrayManager.
Parameters
----------
func : grouped reduction function
ignore_failures : bool, default False
Whether to drop columns where func raises TypeError.
Returns
-------
ArrayManager
"""
result_arrays: list[np.ndarray] = []
result_indices: list[int] = []
for i, arr in enumerate(self.arrays):
try:
res = func(arr)
except (TypeError, NotImplementedError):
if not ignore_failures:
raise
continue
result_arrays.append(res)
result_indices.append(i)
if len(result_arrays) == 0:
index = Index([None]) # placeholder
else:
index = Index(range(result_arrays[0].shape[0]))
if ignore_failures:
columns = self.items[np.array(result_indices, dtype="int64")]
else:
columns = self.items
# error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]";
# expected "List[Union[ndarray, ExtensionArray]]"
return type(self)(result_arrays, [index, columns]) # type: ignore[arg-type]
def operate_blockwise(self, other: ArrayManager, array_op) -> ArrayManager:
"""
Apply array_op blockwise with another (aligned) BlockManager.
"""
# TODO what if `other` is BlockManager ?
left_arrays = self.arrays
right_arrays = other.arrays
result_arrays = [
array_op(left, right) for left, right in zip(left_arrays, right_arrays)
]
return type(self)(result_arrays, self._axes)
def apply(
self: T,
f,
align_keys: list[str] | None = None,
ignore_failures: bool = False,
**kwargs,
) -> T:
"""
Iterate over the arrays, collect and create a new ArrayManager.
Parameters
----------
f : str or callable
Name of the Array method to apply.
align_keys: List[str] or None, default None
ignore_failures: bool, default False
**kwargs
Keywords to pass to `f`
Returns
-------
ArrayManager
"""
assert "filter" not in kwargs
align_keys = align_keys or []
result_arrays: list[np.ndarray] = []
result_indices: list[int] = []
# fillna: Series/DataFrame is responsible for making sure value is aligned
aligned_args = {k: kwargs[k] for k in align_keys}
if f == "apply":
f = kwargs.pop("func")
for i, arr in enumerate(self.arrays):
if aligned_args:
for k, obj in aligned_args.items():
if isinstance(obj, (ABCSeries, ABCDataFrame)):
# The caller is responsible for ensuring that
# obj.axes[-1].equals(self.items)
if obj.ndim == 1:
kwargs[k] = obj.iloc[i]
else:
kwargs[k] = obj.iloc[:, i]._values
else:
# otherwise we have an array-like
kwargs[k] = obj[i]
try:
if callable(f):
applied = f(arr, **kwargs)
else:
applied = getattr(arr, f)(**kwargs)
except (TypeError, NotImplementedError):
if not ignore_failures:
raise
continue
# if not isinstance(applied, ExtensionArray):
# # TODO not all EA operations return new EAs (eg astype)
# applied = array(applied)
result_arrays.append(applied)
result_indices.append(i)
new_axes: list[Index]
if ignore_failures:
# TODO copy?
new_axes = [self._axes[0], self._axes[1][result_indices]]
else:
new_axes = self._axes
if len(result_arrays) == 0:
return self.make_empty(new_axes)
# error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]";
# expected "List[Union[ndarray, ExtensionArray]]"
return type(self)(result_arrays, new_axes) # type: ignore[arg-type]
def apply_2d(self: T, f, ignore_failures: bool = False, **kwargs) -> T:
"""
Variant of `apply`, but where the function should not be applied to
each column independently, but to the full data as a 2D array.
"""
values = self.as_array()
try:
result = f(values, **kwargs)
except (TypeError, NotImplementedError):
if not ignore_failures:
raise
result_arrays = []
new_axes = [self._axes[0], self.axes[1].take([])]
else:
result_arrays = [result[:, i] for i in range(len(self._axes[1]))]
new_axes = self._axes
return type(self)(result_arrays, new_axes)
def apply_with_block(self: T, f, align_keys=None, swap_axis=True, **kwargs) -> T:
# switch axis to follow BlockManager logic
if swap_axis and "axis" in kwargs and self.ndim == 2:
kwargs["axis"] = 1 if kwargs["axis"] == 0 else 0
align_keys = align_keys or []
aligned_args = {k: kwargs[k] for k in align_keys}
result_arrays = []
for i, arr in enumerate(self.arrays):
if aligned_args:
for k, obj in aligned_args.items():
if isinstance(obj, (ABCSeries, ABCDataFrame)):
# The caller is responsible for ensuring that
# obj.axes[-1].equals(self.items)
if obj.ndim == 1:
if self.ndim == 2:
kwargs[k] = obj.iloc[slice(i, i + 1)]._values
else:
kwargs[k] = obj.iloc[:]._values
else:
kwargs[k] = obj.iloc[:, [i]]._values
else:
# otherwise we have an ndarray
if obj.ndim == 2:
kwargs[k] = obj[[i]]
# error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no
# attribute "tz"
if hasattr(arr, "tz") and arr.tz is None: # type: ignore[union-attr]
# DatetimeArray needs to be converted to ndarray for DatetimeLikeBlock
# error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no
# attribute "_data"
arr = arr._data # type: ignore[union-attr]
elif arr.dtype.kind == "m" and not isinstance(arr, np.ndarray):
# TimedeltaArray needs to be converted to ndarray for TimedeltaBlock
# error: "ExtensionArray" has no attribute "_data"
arr = arr._data # type: ignore[attr-defined]
if self.ndim == 2:
arr = ensure_block_shape(arr, 2)
block = new_block(arr, placement=slice(0, 1, 1), ndim=2)
else:
block = new_block(arr, placement=slice(0, len(self), 1), ndim=1)
applied = getattr(block, f)(**kwargs)
if isinstance(applied, list):
applied = applied[0]
arr = applied.values
if self.ndim == 2 and arr.ndim == 2:
# 2D for np.ndarray or DatetimeArray/TimedeltaArray
assert len(arr) == 1
# error: Invalid index type "Tuple[int, slice]" for
# "Union[ndarray, ExtensionArray]"; expected type
# "Union[int, slice, ndarray]"
arr = arr[0, :] # type: ignore[index]
result_arrays.append(arr)
return type(self)(result_arrays, self._axes)
def quantile(
self,
*,
qs: Float64Index,
axis: int = 0,
transposed: bool = False,
interpolation="linear",
) -> ArrayManager:
arrs = [ensure_block_shape(x, 2) for x in self.arrays]
assert axis == 1
new_arrs = [
quantile_compat(x, np.asarray(qs._values), interpolation) for x in arrs
]
for i, arr in enumerate(new_arrs):
if arr.ndim == 2:
assert arr.shape[0] == 1, arr.shape
new_arrs[i] = arr[0]
axes = [qs, self._axes[1]]
return type(self)(new_arrs, axes)
def where(self, other, cond, align: bool, errors: str) -> ArrayManager:
if align:
align_keys = ["other", "cond"]
else:
align_keys = ["cond"]
other = extract_array(other, extract_numpy=True)
return self.apply_with_block(
"where",
align_keys=align_keys,
other=other,
cond=cond,
errors=errors,
)
# TODO what is this used for?
# def setitem(self, indexer, value) -> ArrayManager:
# return self.apply_with_block("setitem", indexer=indexer, value=value)
def putmask(self, mask, new, align: bool = True):
if align:
align_keys = ["new", "mask"]
else:
align_keys = ["mask"]
new = extract_array(new, extract_numpy=True)
return self.apply_with_block(
"putmask",
align_keys=align_keys,
mask=mask,
new=new,
)
def diff(self, n: int, axis: int) -> ArrayManager:
if axis == 1:
# DataFrame only calls this for n=0, in which case performing it
# with axis=0 is equivalent
assert n == 0
axis = 0
return self.apply(algos.diff, n=n, axis=axis, stacklevel=5)
def interpolate(self, **kwargs) -> ArrayManager:
return self.apply_with_block("interpolate", swap_axis=False, **kwargs)
def shift(self, periods: int, axis: int, fill_value) -> ArrayManager:
if fill_value is lib.no_default:
fill_value = None
if axis == 1 and self.ndim == 2:
# TODO column-wise shift
raise NotImplementedError
return self.apply_with_block(
"shift", periods=periods, axis=axis, fill_value=fill_value
)
def fillna(self, value, limit, inplace: bool, downcast) -> ArrayManager:
return self.apply_with_block(
"fillna", value=value, limit=limit, inplace=inplace, downcast=downcast
)
def downcast(self) -> ArrayManager:
return self.apply_with_block("downcast")
def astype(self, dtype, copy: bool = False, errors: str = "raise") -> ArrayManager:
return self.apply(astype_array_safe, dtype=dtype, copy=copy, errors=errors)
def convert(
self,
copy: bool = True,
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
) -> ArrayManager:
def _convert(arr):
if is_object_dtype(arr.dtype):
return soft_convert_objects(
arr,
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
copy=copy,
)
else:
return arr.copy() if copy else arr
return self.apply(_convert)
def replace(self, value, **kwargs) -> ArrayManager:
assert np.ndim(value) == 0, value
# TODO "replace" is right now implemented on the blocks, we should move
# it to general array algos so it can be reused here
return self.apply_with_block("replace", value=value, **kwargs)
def replace_list(
self: T,
src_list: list[Any],
dest_list: list[Any],
inplace: bool = False,
regex: bool = False,
) -> T:
""" do a list replace """
inplace = validate_bool_kwarg(inplace, "inplace")
return self.apply_with_block(
"_replace_list",
src_list=src_list,
dest_list=dest_list,
inplace=inplace,
regex=regex,
)
def to_native_types(self, **kwargs):
return self.apply(to_native_types, **kwargs)
@property
def is_mixed_type(self) -> bool:
return True
@property
def is_numeric_mixed_type(self) -> bool:
return all(is_numeric_dtype(t) for t in self.get_dtypes())
@property
def any_extension_types(self) -> bool:
"""Whether any of the blocks in this manager are extension blocks"""
return False # any(block.is_extension for block in self.blocks)
@property
def is_view(self) -> bool:
""" return a boolean if we are a single block and are a view """
# TODO what is this used for?
return False
@property
def is_single_block(self) -> bool:
return False
def _get_data_subset(self, predicate: Callable) -> ArrayManager:
indices = [i for i, arr in enumerate(self.arrays) if predicate(arr)]
arrays = [self.arrays[i] for i in indices]
# TODO copy?
new_axes = [self._axes[0], self._axes[1][np.array(indices, dtype="int64")]]
return type(self)(arrays, new_axes, verify_integrity=False)
def get_bool_data(self, copy: bool = False) -> ArrayManager:
"""
Select columns that are bool-dtype and object-dtype columns that are all-bool.
Parameters
----------
copy : bool, default False
Whether to copy the blocks
"""
return self._get_data_subset(is_inferred_bool_dtype)
def get_numeric_data(self, copy: bool = False) -> ArrayManager:
"""
Select columns that have a numeric dtype.
Parameters
----------
copy : bool, default False
Whether to copy the blocks
"""
return self._get_data_subset(
lambda arr: is_numeric_dtype(arr.dtype)
or getattr(arr.dtype, "_is_numeric", False)
)
def copy(self: T, deep=True) -> T:
"""
Make deep or shallow copy of ArrayManager
Parameters
----------
deep : bool or string, default True
If False, return shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
BlockManager
"""
# this preserves the notion of view copying of axes
if deep:
# hit in e.g. tests.io.json.test_pandas
def copy_func(ax):
return ax.copy(deep=True) if deep == "all" else ax.view()
new_axes = [copy_func(ax) for ax in self._axes]
else:
new_axes = list(self._axes)
if deep:
new_arrays = [arr.copy() for arr in self.arrays]
else:
new_arrays = self.arrays
return type(self)(new_arrays, new_axes)
def as_array(
self,
transpose: bool = False,
dtype=None,
copy: bool = False,
na_value=lib.no_default,
) -> np.ndarray:
"""
Convert the blockmanager data into an numpy array.
Parameters
----------
transpose : bool, default False
If True, transpose the return array.
dtype : object, default None
Data type of the return array.
copy : bool, default False
If True then guarantee that a copy is returned. A value of
False does not guarantee that the underlying data is not
copied.
na_value : object, default lib.no_default
Value to be used as the missing value sentinel.
Returns
-------
arr : ndarray
"""
if len(self.arrays) == 0:
arr = np.empty(self.shape, dtype=float)
return arr.transpose() if transpose else arr
# We want to copy when na_value is provided to avoid
# mutating the original object
copy = copy or na_value is not lib.no_default
if not dtype:
dtype = interleaved_dtype([arr.dtype for arr in self.arrays])
if isinstance(dtype, SparseDtype):
dtype = dtype.subtype
elif isinstance(dtype, PandasDtype):
dtype = dtype.numpy_dtype
elif is_extension_array_dtype(dtype):
dtype = "object"
elif is_dtype_equal(dtype, str):
dtype = "object"
result = np.empty(self.shape_proper, dtype=dtype)
# error: Incompatible types in assignment (expression has type "Union[ndarray,
# ExtensionArray]", variable has type "ndarray")
for i, arr in enumerate(self.arrays): # type: ignore[assignment]
arr = arr.astype(dtype, copy=copy)
result[:, i] = arr
if na_value is not lib.no_default:
result[isna(result)] = na_value
return result
# return arr.transpose() if transpose else arr
def get_slice(self, slobj: slice, axis: int = 0) -> ArrayManager:
axis = self._normalize_axis(axis)
if axis == 0:
arrays = [arr[slobj] for arr in self.arrays]
elif axis == 1:
arrays = self.arrays[slobj]
new_axes = list(self._axes)
new_axes[axis] = new_axes[axis]._getitem_slice(slobj)
return type(self)(arrays, new_axes, verify_integrity=False)
def fast_xs(self, loc: int) -> ArrayLike:
"""
Return the array corresponding to `frame.iloc[loc]`.
Parameters
----------
loc : int
Returns
-------
np.ndarray or ExtensionArray
"""
dtype = interleaved_dtype([arr.dtype for arr in self.arrays])
values = [arr[loc] for arr in self.arrays]
if isinstance(dtype, ExtensionDtype):
result = dtype.construct_array_type()._from_sequence(values, dtype=dtype)
# for datetime64/timedelta64, the np.ndarray constructor cannot handle pd.NaT
elif is_datetime64_ns_dtype(dtype):
result = DatetimeArray._from_sequence(values, dtype=dtype)._data
elif is_timedelta64_ns_dtype(dtype):
result = TimedeltaArray._from_sequence(values, dtype=dtype)._data
else:
result = np.array(values, dtype=dtype)
return result
def iget(self, i: int) -> SingleArrayManager:
"""
Return the data as a SingleArrayManager.
"""
values = self.arrays[i]
return SingleArrayManager([values], [self._axes[0]])
def iget_values(self, i: int) -> ArrayLike:
"""
Return the data for column i as the values (ndarray or ExtensionArray).
"""
return self.arrays[i]
def idelete(self, indexer):
"""
Delete selected locations in-place (new block and array, same BlockManager)
"""
to_keep = np.ones(self.shape[0], dtype=np.bool_)
to_keep[indexer] = False
self.arrays = [self.arrays[i] for i in np.nonzero(to_keep)[0]]
self._axes = [self._axes[0], self._axes[1][to_keep]]
return self
def iset(self, loc: int | slice | np.ndarray, value: ArrayLike):
"""
Set new column(s).
This changes the ArrayManager in-place, but replaces (an) existing
column(s), not changing column values in-place).
Parameters
----------
loc : integer, slice or boolean mask
Positional location (already bounds checked)
value : np.ndarray or ExtensionArray
"""
# single column -> single integer index
if lib.is_integer(loc):
# TODO can we avoid needing to unpack this here? That means converting
# DataFrame into 1D array when loc is an integer
if isinstance(value, np.ndarray) and value.ndim == 2:
assert value.shape[1] == 1
value = value[:, 0]
# TODO we receive a datetime/timedelta64 ndarray from DataFrame._iset_item
# but we should avoid that and pass directly the proper array
value = ensure_wrapped_if_datetimelike(value)
assert isinstance(value, (np.ndarray, ExtensionArray))
assert value.ndim == 1
assert len(value) == len(self._axes[0])
# error: Invalid index type "Union[int, slice, ndarray]" for
# "List[Union[ndarray, ExtensionArray]]"; expected type "int"
self.arrays[loc] = value # type: ignore[index]
return
# multiple columns -> convert slice or array to integer indices
elif isinstance(loc, slice):
indices = range(
loc.start if loc.start is not None else 0,
loc.stop if loc.stop is not None else self.shape_proper[1],
loc.step if loc.step is not None else 1,
)
else:
assert isinstance(loc, np.ndarray)
assert loc.dtype == "bool"
# error: Incompatible types in assignment (expression has type "ndarray",
# variable has type "range")
indices = np.nonzero(loc)[0] # type: ignore[assignment]
assert value.ndim == 2
assert value.shape[0] == len(self._axes[0])
for value_idx, mgr_idx in enumerate(indices):
# error: Invalid index type "Tuple[slice, int]" for
# "Union[ExtensionArray, ndarray]"; expected type
# "Union[int, slice, ndarray]"
value_arr = value[:, value_idx] # type: ignore[index]
self.arrays[mgr_idx] = value_arr
return
def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None:
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : np.ndarray or ExtensionArray
"""
# insert to the axis; this could possibly raise a TypeError
new_axis = self.items.insert(loc, item)
value = extract_array(value, extract_numpy=True)
if value.ndim == 2:
if value.shape[0] == 1:
# error: Invalid index type "Tuple[int, slice]" for
# "Union[Any, ExtensionArray, ndarray]"; expected type
# "Union[int, slice, ndarray]"
value = value[0, :] # type: ignore[index]
else:
raise ValueError(
f"Expected a 1D array, got an array with shape {value.shape}"
)
value = ensure_wrapped_if_datetimelike(value)
# TODO self.arrays can be empty
# assert len(value) == len(self.arrays[0])
# TODO is this copy needed?
arrays = self.arrays.copy()
arrays.insert(loc, value)
self.arrays = arrays
self._axes[1] = new_axis
def reindex_indexer(
self: T,
new_axis,
indexer,
axis: int,
fill_value=None,
allow_dups: bool = False,
copy: bool = True,
# ignored keywords
consolidate: bool = True,
only_slice: bool = False,
) -> T:
axis = self._normalize_axis(axis)
return self._reindex_indexer(
new_axis, indexer, axis, fill_value, allow_dups, copy
)
def _reindex_indexer(
self: T,
new_axis,
indexer,
axis: int,
fill_value=None,
allow_dups: bool = False,
copy: bool = True,
) -> T:
"""
Parameters
----------
new_axis : Index
indexer : ndarray of int64 or None
axis : int
fill_value : object, default None
allow_dups : bool, default False
copy : bool, default True
pandas-indexer with -1's only.
"""
if indexer is None:
if new_axis is self._axes[axis] and not copy:
return self
result = self.copy(deep=copy)
result._axes = list(self._axes)
result._axes[axis] = new_axis
return result
# some axes don't allow reindexing with dups
if not allow_dups:
self._axes[axis]._validate_can_reindex(indexer)
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 1:
new_arrays = []
for i in indexer:
if i == -1:
arr = self._make_na_array(fill_value=fill_value)
else:
arr = self.arrays[i]
new_arrays.append(arr)
else:
validate_indices(indexer, len(self._axes[0]))
indexer = ensure_int64(indexer)
if (indexer == -1).any():
allow_fill = True
else:
allow_fill = False
new_arrays = [
take_1d(
arr,
indexer,
allow_fill=allow_fill,
fill_value=fill_value,
# if fill_value is not None else blk.fill_value
)
for arr in self.arrays
]
new_axes = list(self._axes)
new_axes[axis] = new_axis
return type(self)(new_arrays, new_axes, verify_integrity=False)
def take(self: T, indexer, axis: int = 1, verify: bool = True) -> T:
"""
Take items along any axis.
"""
axis = self._normalize_axis(axis)
indexer = (
np.arange(indexer.start, indexer.stop, indexer.step, dtype="int64")
if isinstance(indexer, slice)
else np.asanyarray(indexer, dtype="int64")
)
if not indexer.ndim == 1:
raise ValueError("indexer should be 1-dimensional")
n = self.shape_proper[axis]
indexer = maybe_convert_indices(indexer, n, verify=verify)
new_labels = self._axes[axis].take(indexer)
return self._reindex_indexer(
new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True
)
def _make_na_array(self, fill_value=None):
if fill_value is None:
fill_value = np.nan
dtype, fill_value = infer_dtype_from_scalar(fill_value)
# error: Argument "dtype" to "empty" has incompatible type "Union[dtype[Any],
# ExtensionDtype]"; expected "Union[dtype[Any], None, type, _SupportsDType, str,
# Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any],
# _DTypeDict, Tuple[Any, Any]]]"
values = np.empty(self.shape_proper[0], dtype=dtype) # type: ignore[arg-type]
values.fill(fill_value)
return values
def _equal_values(self, other) -> bool:
"""
Used in .equals defined in base class. Only check the column values
assuming shape and indexes have already been checked.
"""
for left, right in zip(self.arrays, other.arrays):
if not array_equals(left, right):
return False
else:
return True
def unstack(self, unstacker, fill_value) -> ArrayManager:
"""
Return a BlockManager with all blocks unstacked..
Parameters
----------
unstacker : reshape._Unstacker
fill_value : Any
fill_value for newly introduced missing values.
Returns
-------
unstacked : BlockManager
"""
indexer, _ = unstacker._indexer_and_to_sort
if unstacker.mask.all():
new_indexer = indexer
allow_fill = False
else:
new_indexer = np.full(unstacker.mask.shape, -1)
new_indexer[unstacker.mask] = indexer
allow_fill = True
new_indexer2D = new_indexer.reshape(*unstacker.full_shape)
new_indexer2D = ensure_int64(new_indexer2D)
new_arrays = []
for arr in self.arrays:
for i in range(unstacker.full_shape[1]):
new_arr = take_1d(
arr,
new_indexer2D[:, i],
allow_fill=allow_fill,
fill_value=fill_value,
)
new_arrays.append(new_arr)
new_index = unstacker.new_index
new_columns = unstacker.get_new_columns(self._axes[1])
new_axes = [new_index, new_columns]
return type(self)(new_arrays, new_axes, verify_integrity=False)
# TODO
# equals
# to_dict
class SingleArrayManager(ArrayManager, SingleDataManager):
__slots__ = [
"_axes", # private attribute, because 'axes' has different order, see below
"arrays",
]
arrays: list[np.ndarray | ExtensionArray]
_axes: list[Index]
ndim = 1
def __init__(
self,
arrays: list[np.ndarray | ExtensionArray],
axes: list[Index],
verify_integrity: bool = True,
):
self._axes = axes
self.arrays = arrays
if verify_integrity:
assert len(axes) == 1
assert len(arrays) == 1
self._axes = [ensure_index(ax) for ax in self._axes]
arr = arrays[0]
arr = ensure_wrapped_if_datetimelike(arr)
if isinstance(arr, ABCPandasArray):
arr = arr.to_numpy()
self.arrays = [arr]
self._verify_integrity()
def _verify_integrity(self) -> None:
(n_rows,) = self.shape
assert len(self.arrays) == 1
arr = self.arrays[0]
assert len(arr) == n_rows
if not arr.ndim == 1:
raise ValueError(
"Passed array should be 1-dimensional, got array with "
f"{arr.ndim} dimensions instead."
)
@staticmethod
def _normalize_axis(axis):
return axis
def make_empty(self, axes=None) -> SingleArrayManager:
"""Return an empty ArrayManager with index/array of length 0"""
if axes is None:
axes = [Index([], dtype=object)]
array = np.array([], dtype=self.dtype)
return type(self)([array], axes)
@classmethod
def from_array(cls, array, index):
return cls([array], [index])
@property
def axes(self):
return self._axes
@property
def index(self) -> Index:
return self._axes[0]
@property
def dtype(self):
return self.array.dtype
def external_values(self):
"""The array that Series.values returns"""
return | external_values(self.array) | pandas.core.internals.blocks.external_values |
import glob
import sys
from pprint import pprint
import pandas as pd
import os
import dateutil
import json
import numpy as np
from datetime import timedelta
SETTLEMENT_DATE = 'Settlement Date'
ACCOUNT_TYPE = 'Account Type'
RRSP_ACCOUNT_TYPE = 'Individual RRSP'
TFSA_ACCOUNT_TYPE = 'Individual TFSA'
ACTIVITY_TYPE = 'Activity Type'
WITHDRAWALS = 'Withdrawals'
DEPOSIT = 'Deposits'
INTEREST = 'Interest'
TRANSFER = 'Transfers'
CAD_GROSS = 'CAD Gross'
CAD_COMMISSION = 'CAD Commission'
QUANTITY = 'Quantity'
ACTION = 'Action'
CONTRIBUTION = 'CON'
EXCHANGE_RATE = 'CAD2USD'
NORMALIZED_SYMBOL = 'Normalized Symbol'
ACCOUNT_NUMBER = 'Account #'
def _extract_cad_amount(row):
if row['Net Amount'] != 0:
amount = row['Net Amount']
else:
description = row['Description']
start_tag = 'BOOK VALUE $'
try:
sign = np.sign(row['Quantity'])
amount = float(description[description.find(start_tag) + len(start_tag):].replace(',', '')) * sign
except ValueError as e:
pprint(row.to_dict())
raise ValueError(description) from e
if row['Currency'] == 'USD':
amount *= row[EXCHANGE_RATE]
return amount
symbol_map = {
'8200999': 'Interest',
'DLR': 'DLR.TO',
'H038778': 'DLR.TO',
'H062990': 'QQQ',
'VCN': 'VCN.TO',
'VGRO': 'VGRO.TO',
'V007563': 'VOO',
'XAW': 'XAW.TO',
'ZAG': 'ZAG.TO',
np.nan: ''
}
normalized_symbol = {
'DLR.TO', 'QQQ', 'VCN.TO', 'VEQT.TO', 'VGRO.TO', 'VOO', 'XAW.TO', 'ZAG.TO', None
}
def _normalize_symbol(symbol):
if symbol in normalized_symbol:
return symbol
assert symbol in symbol_map, f'Please add {symbol} into symbol_map'
return symbol_map[symbol]
def _fill_gaps(days, rates):
new_days = []
new_rates = []
for day, rate in zip(days, rates):
if len(new_days) > 0:
last_date = new_days[-1]
gap = (day - last_date).days
if gap > 1:
avg_rate = (new_rates[-1] + rate) / 2
for num_days in range(1, gap):
new_days.append(last_date + timedelta(days=num_days))
new_rates.append(avg_rate)
else:
assert gap == 1
new_days.append(day)
new_rates.append(rate)
return new_days, new_rates
def read_bank_of_canada_exchange_rate(exchange_rate_file):
exchange_rate = pd.read_csv(exchange_rate_file)
exchange_rate['date'] = exchange_rate['date'].apply(lambda x: dateutil.parser.parse(x).date())
days, rates = _fill_gaps(exchange_rate['date'], exchange_rate['FXUSDCAD'])
return pd.DataFrame(
data={
SETTLEMENT_DATE: days,
EXCHANGE_RATE: rates
})
def _compute_acb(quantities, amounts, commissions):
avg_share_price = []
capital_gains = []
acb = []
total_shares = 0
total_acb = 0
for num_shares, amount, commission in zip(quantities, amounts, commissions):
assert commission <= 0
# either buy or sell
if not (num_shares > 0 > amount) and not (amount > 0 > num_shares):
print(
f'num_shares={num_shares} and amount={amount} are not compatible, is this a transfer or an error?',
file=sys.stderr
)
amount *= -1
if num_shares > 0:
total_shares += num_shares
total_acb -= amount + commission
capital_gains.append(0)
acb.append(0)
elif num_shares < 0:
value = num_shares * total_acb / total_shares
total_acb += value
total_shares += num_shares
acb.append(-(value + commission))
capital_gains.append(amount + value + commission)
else:
raise ValueError()
if total_shares > 0:
avg_share_price.append(total_acb / total_shares)
else:
avg_share_price.append(0)
return avg_share_price, capital_gains, acb
class Questrade:
@staticmethod
def from_files(data_dir, exchange_rate_file):
transactions = None
for fpath in glob.glob(f'{data_dir}/*.xlsx'):
activities = | pd.read_excel(fpath) | pandas.read_excel |
"""
Parse FGDC metadata
"""
import re
from pathlib import Path
import geopandas as gpd
import pandas as pd
from bs4 import BeautifulSoup
from shapely.geometry import box
def parse_xml(xml, fields):
soup = BeautifulSoup(xml)
# Field names must be unique within the FGDC metadata
data = {}
for field in fields:
xml_field = soup.find(field)
data[field] = xml_field and xml_field.text
return data
def parse_meta(meta_dir):
meta_dir = Path(meta_dir)
xy_regex = re.compile(r'x(\d{2})y(\d{3})')
data = []
i = 0
for meta_file in meta_dir.glob('**/*.xml'):
name = meta_file.stem
x, y = xy_regex.search(name).groups()
project_dir = meta_file.parents[1].name
with meta_file.open() as f:
d = parse_xml(
f.read(),
fields=['utmzone', 'westbc', 'eastbc', 'northbc', 'southbc'])
d['name'] = name
d['x'] = x
d['y'] = y
d['project_dir'] = project_dir
data.append(d)
i += 1
if i % 1000 == 0:
print(i)
return pd.DataFrame(data)
def meta_to_wgs84(df):
"""Convert meta file with multiple UTM zones to WGS84
TODO check that projected position is near WGS bounds from metadata file. At
least a couple occurences of elevation in the ocean. Probably should be utm
11 instead of 10.
"""
df['x'] = | pd.to_numeric(df['x']) | pandas.to_numeric |
# ---
# jupyter:
# jupytext:
# notebook_metadata_filter: all,-language_info,-toc,-latex_envs
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.6.1-dev
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# (landsat1)=
# # Landsat image processing 1
# %% [markdown]
# https://medium.com/@mommermiscience/dealing-with-geospatial-raster-data-in-python-with-rasterio-775e5ba0c9f5
#
# https://www.perrygeo.com/python-affine-transforms.html
#
# http://geologyandpython.com/get-landsat-8.html
# %% [markdown]
# ## Bulk image download from AWS
#
# Notes drawing on http://geologyandpython.com/get-landsat-8.html
# %%
import pandas as pd
import a301_lib
import datetime as dt
import dateutil.parser
import numpy as np
from pathlib import Path
# %%
# !pwd
# %%
download_catalog=True
if download_catalog:
s3_scenes = | pd.read_csv('http://landsat-pds.s3.amazonaws.com/c1/L8/scene_list.gz', compression='gzip') | pandas.read_csv |
"""Profile Settings Page."""
import dash_html_components as html
import dash_table
import pandas as pd
from dash.dependencies import Input, Output
from dash_charts import appUtils
from icecream import ic
from .plaidWrapper import PlaidDashWrapper
class TabProfile(appUtils.TabBase):
"""Profile Page."""
NAME = 'Profile'
df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/solar.csv')
def __init__(self, app):
"""Initialize the tab and verify data members.
app -- Dash application instance
"""
super().__init__(app)
self.pdw = PlaidDashWrapper(app)
def createLayout(self):
"""Return the Dash layout components."""
return html.Div(className='section', children=[
html.H1('Manage User Profile'),
html.H2('Edit Linked Accounts'),
# TODO: Add confirmation modal when deleting an account
dash_table.DataTable(
id='table',
columns=[{"name": i, "id": i} for i in self.df.columns],
data=self.df.to_dict('records'),
row_deletable=True,
),
self.pdw.createLayout(),
])
def registerCallbacks(self):
"""Register callbacks necessary for this tab."""
self._edits()
self.pdw.registerCallbacks()
def _edits(self):
"""Read changes to the data table."""
@self.app.callback(
Output('table', 'figure'),
[Input('table', 'data'), Input('table', 'columns')])
def readTableChanges(rows, columns):
self.df = | pd.DataFrame(rows, columns=[c['name'] for c in columns]) | pandas.DataFrame |
import json
import os
from imblearn.over_sampling import ADASYN, RandomOverSampler
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier, export_graphviz
import pandas as pd
import numpy as np
import random
from sklearn.model_selection import KFold, GridSearchCV
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score, roc_auc_score, confusion_matrix, roc_curve, classification_report
from sklearn.model_selection import train_test_split
import sklearn
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
import xgboost as xgb
from sklearn.utils import resample
from DecisionTree.MyDecisionTree import MyDecisionTree
from LogisticRegression.MyLogisticRegression import MyLogisticRegression
from NaiveBayes.MyNaiveBayes import MyGaussianNB
pd.set_option('display.max_rows', 1000) # 具体的行数或列数可自行设置
pd.set_option('display.max_columns', 1000)
class MyModel():
def __init__(self):
self.train_df = pd.read_csv("VI_train.csv")
self.test_df = pd.read_csv("VI_test.csv")
self.train_df = self.train_df.drop(['Unnamed: 0'], axis=1)
# 对训练集和测试集进行标准化
def standardData(self, X_train, X_valid, X_test):
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
X_valid = sc_X.transform(X_valid)
return X_train, X_valid, X_test
def upsampleFeature(self, df):
def upsample(df, u_feature, n_upsampling):
df_temp = df.copy()
ones = df.copy()
# 根据不同特征,在其周边随机生成
for n in range(n_upsampling):
if u_feature == 'Annual_Premium':
df_temp[u_feature] = ones[u_feature].apply(
lambda x: x + random.randint(-1, 1) * x * 0.05) # change Annual_premiun in the range of 5%
else:
df_temp[u_feature] = ones[u_feature].apply(
lambda x: x + random.randint(-5, 5)) # change Age in the range of 5 years
if n == 0:
df_new = df_temp.copy()
else:
df_new = pd.concat([df_new, df_temp])
return df_new
df_train_up_age = upsample(df.loc[df['Response'] == 1], 'Age', 1)
df_train_up_vintage = upsample(df.loc[df['Response'] == 1], 'Vintage', 1)
df_ext = pd.concat([df, df_train_up_age])
df_ext = pd.concat([df_ext, df_train_up_vintage])
# X_train = df_ext.drop(columns=['Response'])
# y_train = df_ext.Response
print(len(df_ext))
return df_ext
def upsampleData(self, df):
ros = RandomOverSampler(random_state=42, sampling_strategy='minority')
x_train_sampled, y_train_sampled = ros.fit_resample(df.drop('Response', axis=1), df['Response'])
ada = ADASYN(random_state=42)
x_train_sampled, y_train_sampled = ada.fit_resample(df.drop('Response', axis=1), df['Response'])
x_train_sampled['Response'] = y_train_sampled
print(len(x_train_sampled))
return x_train_sampled
def downsample(self, df):
df_no_response = df[df['Response'] == 0]
df_response = df[df['Response'] == 1]
df_no_response_downsampled = resample(df_no_response,
replace=False,
n_samples=int(len(df_response)*2),
random_state=42)
df_downsample = pd.concat([df_no_response_downsampled, df_response])
print(len(df_downsample))
return df_downsample
def featureEngineer(self, df_train, df_test):
# 获得特征名
df_train_response = df_train.loc[df_train.Response == 1].copy()
categorical_features = ['Gender', 'Driving_License', 'Region_Code', 'Previously_Insured', 'Vehicle_Age',
'Vehicle_Damage', 'Policy_Sales_Channel']
text_features = ['Gender', 'Vehicle_Age', 'Vehicle_Damage']
# 对于文本特征进行编码
labelEncoder = preprocessing.LabelEncoder()
for f in text_features:
df_train[f] = labelEncoder.fit_transform(df_train[f])
df_test[f] = labelEncoder.fit_transform(df_test[f])
# 更改数据类型
df_train.Region_Code = df_train.Region_Code.astype('int32')
df_train.Policy_Sales_Channel = df_train.Policy_Sales_Channel.astype('int32')
df_test.Region_Code = df_test.Region_Code.astype('int32')
df_test.Policy_Sales_Channel = df_test.Policy_Sales_Channel.astype('int32')
# 对年龄按照年龄段进行编码
bins = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
df_train['bin_age'] = pd.cut(df_train['Age'], bins) # 按照分段分类
df_train['age_bin_cat'] = labelEncoder.fit_transform(df_train['bin_age'])
df_test['bin_age'] = | pd.cut(df_test['Age'], bins) | pandas.cut |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
import itertools
import numpy as np
import pytest
from pandas.compat import u
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Period, Series, Timedelta, date_range)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestDataFrameReshape(TestData):
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
tm.assert_frame_equal(pivoted, expected)
# name tracking
assert pivoted.index.name == 'index'
assert pivoted.columns.name == 'columns'
# don't specify values
pivoted = frame.pivot(index='index', columns='columns')
assert pivoted.index.name == 'index'
assert pivoted.columns.names == (None, 'columns')
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with pytest.raises(ValueError, match='duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame({}, columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame()
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
tm.assert_index_equal(result.columns, Index(['A', 'B'], name=0))
def test_pivot_index_none(self):
# gh-3962
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data).set_index('index')
result = frame.pivot(columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns='columns')
expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),
('values', 'Two')],
names=[None, 'columns'])
expected.index.name = 'index'
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.name == 'index'
assert result.columns.names == (None, 'columns')
expected.columns = expected.columns.droplevel(0)
result = frame.pivot(columns='columns', values='values')
expected.columns.name = 'columns'
tm.assert_frame_equal(result, expected)
def test_stack_unstack(self):
df = self.frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, df)
assert_frame_equal(unstacked_df['bar'], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, df)
assert_frame_equal(unstacked_cols_df['bar'].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, 'a', 'b'], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0],
columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(1, index=MultiIndex.from_product([levels[0],
levels[2]]),
columns=levels[1])
assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[['a', 'b']].stack(1)
expected = expected[['a', 'b']]
assert_frame_equal(result, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack(fill_value=-1)
expected = DataFrame({'a': [1, -1, 5], 'b': [2, 4, -1]},
index=['x', 'y', 'z'], dtype=np.int16)
assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame({'a': [1, 0.5, 5], 'b': [2, 4, 0.5]},
index=['x', 'y', 'z'], dtype=np.float)
assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame({'x': ['a', 'a', 'b'],
'y': ['j', 'k', 'j'],
'z': [0, 1, 2],
'w': [0, 1, 2]}).set_index(['x', 'y', 'z'])
unstacked = df.unstack(['x', 'y'], fill_value=0)
key = ('<KEY>')
expected = unstacked[key]
result = pd.Series([0, 0, 2], index=unstacked.index, name=key)
assert_series_equal(result, expected)
stacked = unstacked.stack(['x', 'y'])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
assert_frame_equal(result, df)
# From a series
s = df['w']
result = s.unstack(['x', 'y'], fill_value=0)
expected = unstacked['w']
assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list('AB'), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
# From a mixed type dataframe
df['A'] = df['A'].astype(np.int16)
df['B'] = df['B'].astype(np.float64)
result = df.unstack(fill_value=-1)
expected['A'] = expected['A'].astype(np.int16)
expected['B'] = expected['B'].astype(np.float64)
assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.float)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = pd.date_range('2012-01-01', periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [dv[0], pd.NaT, dv[3]],
'b': [dv[1], dv[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame({'a': [dv[0], dv[0], dv[3]],
'b': [dv[1], dv[2], dv[0]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [td[0], pd.NaT, td[3]],
'b': [td[1], td[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame({'a': [td[0], td[1], td[3]],
'b': [td[1], td[2], td[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [Period('2012-01'), Period('2012-02'), Period('2012-03'),
Period('2012-04')]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [periods[0], None, periods[3]],
'b': [periods[1], periods[2], None]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame({'a': [periods[0], periods[1], periods[3]],
'b': [periods[1], periods[2], periods[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = pd.Series(['a', 'b', 'c', 'a'], dtype='category')
data.index = pd.MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')],
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame({'a': pd.Categorical(list('axa'),
categories=list('abc')),
'b': pd.Categorical(list('bcx'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
# Fill with non-category results in a TypeError
msg = r"'fill_value' \('d'\) is not in"
with pytest.raises(TypeError, match=msg):
data.unstack(fill_value='d')
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value='c')
expected = DataFrame({'a': pd.Categorical(list('aca'),
categories=list('abc')),
'b': pd.Categorical(list('bcc'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = pd.DataFrame(dict(state=['IL', 'MI', 'NC'],
index=['a', 'b', 'c'],
some_categories=pd.Series(['a', 'b', 'c']
).astype('category'),
A=np.random.rand(3),
B=1,
C='foo',
D=pd.Timestamp('20010102'),
E=pd.Series([1.0, 50.0, 100.0]
).astype('float32'),
F=pd.Series([3.0, 4.0, 5.0]).astype('float64'),
G=False,
H=pd.Series([1, 200, 923442], dtype='int8')))
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(['state', 'index'])
unstack_and_compare(df1, 'index')
df1 = df.set_index(['state', 'some_categories'])
unstack_and_compare(df1, 'some_categories')
df1 = df.set_index(['F', 'C'])
unstack_and_compare(df1, 'F')
df1 = df.set_index(['G', 'B', 'state'])
unstack_and_compare(df1, 'B')
df1 = df.set_index(['E', 'A'])
unstack_and_compare(df1, 'E')
df1 = df.set_index(['state', 'index'])
s = df1['A']
unstack_and_compare(s, 'index')
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3),
repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
assert_frame_equal(df.stack(level=[1, 2]),
df.stack(level=1).stack(level=1))
assert_frame_equal(df.stack(level=[-2, -1]),
df.stack(level=1).stack(level=1))
df_named = df.copy()
df_named.columns.set_names(range(3), inplace=True)
assert_frame_equal(df_named.stack(level=[1, 2]),
df_named.stack(level=1).stack(level=1))
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ['exp', 'animal', 1]
assert_frame_equal(df2.stack(level=['animal', 1]),
animal_hair_stacked, check_names=False)
assert_frame_equal(df2.stack(level=['exp', 1]),
exp_hair_stacked, check_names=False)
# When mixed types are passed and the ints are not level
# names, raise
msg = ("level should contain all level names or all level numbers, not"
" a mixture of the two")
with pytest.raises(ValueError, match=msg):
df2.stack(level=['animal', 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ['exp', 'animal', 0]
assert_frame_equal(df3.stack(level=['animal', 0]),
animal_hair_stacked, check_names=False)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=['exp', 'animal'])
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
assert_frame_equal(df2.stack(level=[1, 2]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 1]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 2]), exp_hair_stacked,
check_names=False)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
assert_frame_equal(df3.stack(level=[0, 1]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 0]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 1]), exp_hair_stacked,
check_names=False)
def test_unstack_bool(self):
df = DataFrame([False, False],
index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]),
columns=['col'])
rs = df.unstack()
xp = DataFrame(np.array([[False, np.nan], [np.nan, False]],
dtype=object),
index=['a', 'b'],
columns=MultiIndex.from_arrays([['col', 'col'],
['c', 'l']]))
assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = pd.MultiIndex(
levels=[[u('foo'), u('bar')], [u('one'), u('two')],
[u('a'), u('b')]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=[u('first'), u('second'), u('third')])
s = pd.Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = pd.MultiIndex(
levels=[['foo', 'bar'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['first', 'second'])
expected = pd.DataFrame(np.array([[np.nan, 0],
[0, np.nan],
[np.nan, 0],
[0, np.nan]],
dtype=np.float64),
index=expected_mi,
columns=pd.Index(['a', 'b'], name='third'))
assert_frame_equal(result, expected)
def test_unstack_to_series(self):
# check reversibility
data = self.frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
assert_frame_equal(undo, self.frame)
# check NA handling
data = DataFrame({'x': [1, 2, np.NaN], 'y': [3.0, 4, np.NaN]})
data.index = Index(['a', 'b', 'c'])
result = data.unstack()
midx = MultiIndex(levels=[['x', 'y'], ['a', 'b', 'c']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 26 14:55:32 2018
@author: kazuki.onodera
check all feature
"""
import gc, os
from tqdm import tqdm
import pandas as pd
import numpy as np
import sys
sys.path.append(f'/home/{os.environ.get("USER")}/PythonLibrary')
import lgbextension as ex
import lightgbm as lgb
from multiprocessing import cpu_count, Pool
from glob import glob
from collections import defaultdict
import utils, utils_cat
utils.start(__file__)
#==============================================================================
LOOP = 3
SEED = 71
SUBMIT_FILE_PATH = '../output/726-2_check_all_feature.csv.gz'
COMMENT = f'CV auc-mean(7 fold): 0.80453 + 0.00324 all(700+142)'
EXE_SUBMIT = True
param = {
'objective': 'binary',
'metric': 'auc',
'learning_rate': 0.01,
'max_depth': 6,
'num_leaves': 63,
'max_bin': 255,
'min_child_weight': 10,
'min_data_in_leaf': 150,
'reg_lambda': 0.5, # L2 regularization term on weights.
'reg_alpha': 0.5, # L1 regularization term on weights.
'colsample_bytree': 0.9,
'subsample': 0.9,
# 'nthread': 32,
'nthread': cpu_count(),
'bagging_freq': 1,
'verbose':-1,
'seed': SEED
}
np.random.seed(SEED)
# =============================================================================
# feature
# =============================================================================
df_lb800 = pd.read_csv('../output/725-1_X.csv.gz')
df_lb804 = pd.read_csv('../output/725-2_X.csv.gz')
sub_lb800 = pd.read_csv('../output/725-1.csv.gz')
sub_lb804 = pd.read_csv('../output/725-2.csv.gz')
feature_all = sorted( set(df_lb800.columns.tolist() + df_lb804.columns.tolist()) )
file_tr = ('../feature/train_' + pd.Series(feature_all) + '.f').tolist()
file_te = ('../feature/test_' + pd.Series(feature_all) + '.f').tolist()
# =============================================================================
# load data
# =============================================================================
X_train = pd.concat([
| pd.read_feather(f) | pandas.read_feather |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 14:29:57 2020
@author: Shane
"""
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
import scipy
import scipy.stats
import operator
from operator import truediv
import glob
import statsmodels.stats.api as sms
#import matplotlib for plotting
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.ticker as ticker
import seaborn as sns
import math
from math import sqrt
from scipy.spatial import distance
#import os to handle operating system
import os
#=============================================================================
#Goal: Import appended datasets to generate summary plots.
#==============================================================================
#setup the data directory
datadir = "D:\\Goode_Lab\\Projects\\actin_cables\\data\\cable_trajectory_data\\"
#initalize data frame to append all data
df_t0 = pd.DataFrame()
df_t8 = pd.DataFrame()
all_df = pd.DataFrame()
#read in the summary data files to compare t0 to t8 cells
df_t0 = pd.read_csv(datadir + \
"200826_t0_all_cable_extension_rate_analysis_cutoff.csv")
df_t8 = pd.read_csv(datadir + \
"200826_t8_all_cable_extension_rate_analysis_cutoff.csv")
#combine data into a single dataframe for some of the plotting/stats
frames = [df_t0, df_t8]
all_df = pd.concat(frames)
#=============================================================================
#calculate means and std deviation for each time point
df_t0_t_mean = pd.DataFrame()
df_t0_t_mean = df_t0.groupby(['lifetime']).mean().reset_index()
df_t0_t_std = pd.DataFrame()
df_t0_t_std = df_t0.groupby(['lifetime']).std().reset_index()
df_t8_t_mean = pd.DataFrame()
df_t8_t_mean = df_t8.groupby(['lifetime']).mean().reset_index()
df_t8_t_std = pd.DataFrame()
df_t8_t_std = df_t8.groupby(['lifetime']).std().reset_index()
#calculate means for each timepoint for each replicate
df_t0_expt_mean = df_t0.groupby(['lifetime', 'n'],\
sort=False).mean().reset_index()
df_t8_expt_mean = df_t8.groupby(['lifetime', 'n'],\
sort=False).mean().reset_index()
#=============================================================================
#initialize plotting parameters
cmap = ["#7fb800", "#ffb400"]
ft = 22 #font size for axis
ft2 = 30 #font size for axis
t_tot = np.linspace(0,60,61) #range of times to plot
o = ['t0', 't8'] #order to plot initial rate
st = 'ticks' #set the style of ticks
#=============================================================================
#plot the extension rates using the mean of each replicate
with sns.axes_style(st):
plt.figure(figsize=(5,5))
#plot the mean and 95%CI for the replicates
ax = sns.lineplot(x=df_t0_expt_mean['lifetime'],\
y=df_t0_expt_mean['vel'],
color='#7fb800', ci=95, label='Mean', lw=3)
#plot the mean for each replicates
ax = sns.scatterplot(x=df_t0_expt_mean['lifetime'], \
y=df_t0_expt_mean['vel'],
color = 'grey', label='Experiment', edgecolor='k',\
linewidth=1, alpha=1, s=80)
plt.xlabel('Extension time (sec)', fontsize=ft)
plt.ylabel(u'Extension rate (${\mu}m$/sec)', fontsize=ft)
ax.yaxis.set_major_locator(ticker.MultipleLocator(0.1))
plt.rc('xtick', labelsize=ft)
plt.rc('ytick', labelsize=ft)
plt.ylim([0, 0.5])
plt.xlim([0, 60])
plt.tight_layout()
# plt.savefig('201217_uninduced_cable_ext_vs_lifetime_exptN.svg')
with sns.axes_style(st):
plt.figure(figsize=(5,5))
#plot the mean and 95%CI for the replicates
ax = sns.lineplot(x=df_t8_expt_mean['lifetime'],\
y=df_t8_expt_mean['vel'],
color='#ffb400', ci=95, label='Mean', lw=3)
#plot the mean for each replicates
ax = sns.scatterplot(x=df_t8_expt_mean['lifetime'], \
y=df_t8_expt_mean['vel'],
color = 'grey', label='Experiment', edgecolor='k',\
linewidth=1, alpha=1, s=80)
plt.xlabel('Extension time (sec)', fontsize=ft)
plt.ylabel(u'Extension rate (${\mu}m$/sec)', fontsize=ft)
ax.yaxis.set_major_locator(ticker.MultipleLocator(0.1))
ax.tick_params('both', length=5, which='both')
plt.rc('xtick', labelsize=ft)
plt.rc('ytick', labelsize=ft)
plt.ylim([0, 0.5])
plt.xlim([0, 60])
plt.tight_layout()
# plt.savefig('201217_induced_cable_ext_vs_lifetime_exptN.svg')
#=============================================================================
#plot the change in length as a function of time
with sns.axes_style(st):
plt.figure(figsize=(5,5))
sns.set_palette(cmap)
#plot each cable
ax = sns.scatterplot(x=df_t8_expt_mean['lifetime'], \
y=df_t8_expt_mean['neck_dist'], \
color='#ffb400',alpha=1, \
linewidth=0.7, edgecolor='k')
ax = sns.scatterplot(x=df_t0_expt_mean['lifetime'],\
y=df_t0_expt_mean['neck_dist'], \
color='#7fb800',alpha=1,\
linewidth=0.7, edgecolor='k')
#plot the mean and 95%CI for the replicates
ax = sns.lineplot(x=df_t0_expt_mean['lifetime'], \
y=df_t0_expt_mean['neck_dist'], \
ci=95, label='cdc28-13, uninduced',\
color='#7fb800', lw=3)
ax = sns.lineplot(x=df_t8_expt_mean['lifetime'], \
y=df_t8_expt_mean['neck_dist'],\
color='#ffb400', ci=95, label = 'cdc28-13, induced',\
lw=3)
plt.xlabel('Extension time (sec)', fontsize=ft)
plt.ylabel(u'Cable length (${\mu}m$)', fontsize=ft)
ax.yaxis.set_major_locator(ticker.MultipleLocator(5))
ax.tick_params('both', length=5, which='both')
ax.get_legend().remove()
plt.rc('xtick', labelsize=ft)
plt.rc('ytick', labelsize=ft)
plt.ylim([0, 15])
plt.xlim([0, 60])
plt.tight_layout()
# plt.savefig('201217_cdct0_cdct8_len_vs_lifetime.svg')
#=============================================================================
#plot the change in rate as a function of length and the ratio of lengths
#use the time averaged lengths and rates for this or the plots don't plot well
#due to the different values for each length
#calculate the errors (95%CI) for each strain
t0_lower_ci = df_t0_t_mean - (1.96 * (df_t0_t_std / np.sqrt(64)))
t0_upper_ci = df_t0_t_mean + (1.96 * (df_t0_t_std / np.sqrt(64)))
t8_lower_ci = df_t8_t_mean - (1.96 * (df_t8_t_std / np.sqrt(57)))
t8_upper_ci = df_t8_t_mean + (1.96 * (df_t8_t_std / np.sqrt(57)))
#plot the change in extension rate as a function of the relative length
with sns.axes_style(st):
plt.figure(figsize=(5,5))
sns.set_palette(cmap)
ax = sns.lineplot(x=df_t0_t_mean['neck_dist']/4.9, y=df_t0_t_mean['vel'],\
ci=95, label='cdc28-13, uninduced', color='#7fb800', lw=3)
ax = sns.lineplot(x=df_t8_t_mean['neck_dist']/8.7, y=df_t8_t_mean['vel'],\
color='#ffb400', ci=95, label = 'cdc28-13, induced',\
lw=3)
plt.fill_between(df_t0_t_mean['neck_dist']/4.9, t0_lower_ci['vel'],\
t0_upper_ci['vel'],\
color='#7fb800', alpha=0.3)
plt.fill_between(df_t8_t_mean['neck_dist']/8.7, t8_lower_ci['vel'],\
t8_upper_ci['vel'],\
color='#ffb400', alpha=0.3)
plt.xlabel('Cable length / Mother cell length', fontsize=ft-6)
plt.ylabel(u'Extension rate (${\mu}m$/sec)', fontsize=ft)
ax.yaxis.set_major_locator(ticker.MultipleLocator(0.1))
ax.tick_params('both', length=5, which='both')
ax.get_legend().remove()
plt.rc('xtick', labelsize=ft)
plt.rc('ytick', labelsize=ft)
plt.ylim([0, 0.4])
plt.xlim([0, 1.1])
plt.tight_layout()
# plt.savefig('201217_cdct0_cdct8_extrate_v_len_norm.svg')
#plot the change in extension rate as a function of the length
with sns.axes_style(st):
plt.figure(figsize=(5,5))
sns.set_palette(cmap)
ax = sns.lineplot(x=df_t0_t_mean['neck_dist'], y=df_t0_t_mean['vel'], \
ci=95, label='cdc28-13, uninduced', color='#7fb800', lw=3)
ax = sns.lineplot(x=df_t8_t_mean['neck_dist'], y=df_t8_t_mean['vel'],\
color='#ffb400', ci=95, label = 'cdc28-13, induced',\
lw=3)
plt.fill_between(df_t0_t_mean['neck_dist'], t0_lower_ci['vel'],\
t0_upper_ci['vel'],\
color='#7fb800', alpha=0.3)
plt.fill_between(df_t8_t_mean['neck_dist'], t8_lower_ci['vel'],\
t8_upper_ci['vel'],\
color='#ffb400', alpha=0.3)
plt.xlabel(u'Cable length (${\mu}m$)', fontsize=ft)
plt.ylabel(u'Extension rate (${\mu}m$/sec)', fontsize=ft)
ax.yaxis.set_major_locator(ticker.MultipleLocator(0.1))
ax.xaxis.set_major_locator(ticker.MultipleLocator(2))
ax.tick_params('both', length=5, which='both')
ax.get_legend().remove()
plt.rc('xtick', labelsize=ft)
plt.rc('ytick', labelsize=ft)
plt.ylim([0, 0.4])
plt.xlim([0, 10])
plt.tight_layout()
# plt.savefig('201217_cdct0_cdct8_extrate_v_len.svg')
#plot the change in extension rate as a function of the theoretical time to
#reach the end of the cell compartment
with sns.axes_style(st):
plt.figure(figsize=(5,5))
sns.set_palette(cmap)
ax = sns.scatterplot(x=df_t8_expt_mean['lifetime']/(8.7/0.32),\
y=df_t8_expt_mean['neck_dist']/8.7, \
color='#ffb400',alpha=1,linewidth=0.7, edgecolor='k')
ax = sns.scatterplot(x=df_t0_expt_mean['lifetime']/(4.9/0.35),\
y=df_t0_expt_mean['neck_dist']/4.9, \
color='#7fb800',alpha=1,linewidth=0.7, edgecolor='k')
ax = sns.lineplot(x=df_t0_expt_mean['lifetime']/(4.9/0.35),\
y=df_t0_expt_mean['neck_dist']/4.9, \
ci=95, label='cdc28-13, uninduced', color='#7fb800', lw=3)
ax = sns.lineplot(x=df_t8_expt_mean['lifetime']/(8.7/0.32),\
y=df_t8_expt_mean['neck_dist']/8.7,\
color='#ffb400', ci=95, label = 'cdc28-13, induced',\
lw=3)
plt.xlabel('Time / Time$_{max}$', fontsize=ft)
plt.ylabel('Cable length / Mother cell length', fontsize=ft-6)
ax.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax.tick_params('both', length=5, which='both')
ax.get_legend().remove()
plt.rc('xtick', labelsize=ft)
plt.rc('ytick', labelsize=ft)
plt.ylim([0, 1.6])
plt.xlim([0, 2.4])
plt.tight_layout()
# plt.savefig('201217_cdct0_cdct8_rellen_vs_tmax_uniqueR0.svg')
#plot the change in extension rate as a function of time
with sns.axes_style(st):
plt.figure(figsize=(5,5))
sns.set_palette(cmap)
ax = sns.scatterplot(x=df_t8_expt_mean['lifetime'],\
y=df_t8_expt_mean['neck_dist']/8.7, \
color='#ffb400',alpha=1,linewidth=0.7, edgecolor='k')
ax = sns.scatterplot(x=df_t0_expt_mean['lifetime'],\
y=df_t0_expt_mean['neck_dist']/4.9, \
color='#7fb800',alpha=1,linewidth=0.7, edgecolor='k')
ax = sns.lineplot(x=df_t0_expt_mean['lifetime'],\
y=df_t0_expt_mean['neck_dist']/4.9, \
ci=95, label='cdc28-13, uninduced', color='#7fb800', lw=3)
ax = sns.lineplot(x=df_t8_expt_mean['lifetime'],\
y=df_t8_expt_mean['neck_dist']/8.7,\
color='#ffb400', ci=95, label = 'cdc28-13, induced',\
lw=3)
plt.xlabel('Extension time (sec)', fontsize=ft)
plt.ylabel('Cable length / Mother cell length', fontsize=ft-6)
ax.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax.tick_params('both', length=5, which='both')
ax.get_legend().remove()
plt.rc('xtick', labelsize=ft)
plt.rc('ytick', labelsize=ft)
plt.ylim([0, 1.6])
# plt.xlim([0, ])
plt.tight_layout()
# plt.savefig('201217_cdct0_cdct8_rellen_vs_time.svg')
#=============================================================================
#now fit the initial extension rates with linear regression
#first fit the FP data
t_fit = 5 #final frame to fit, ~10s
from scipy.stats import linregress
#fit the uninduced cells
slope_t0, intercept_t0, r_value_t0, p_value_t0, std_err_t0 = \
scipy.stats.linregress(df_t0_t_mean['lifetime'][:t_fit], \
df_t0_t_mean['vel'][:t_fit])
print("r-squared_t0:", r_value_t0**2, "slope_t0:", slope_t0)
#fit the induced cells
slope_t8, intercept_t8, r_value_t8, p_value_t8, std_err_t8 = \
scipy.stats.linregress(df_t8_t_mean['lifetime'][:t_fit-1], \
df_t8_t_mean['vel'][:t_fit-1])
print("r-squared_t8:", r_value_t8**2, "slope_t8:", slope_t8)
#=============================================================================
#plot the fit from linear regression over the initial extension rates
with sns.axes_style(st):
plt.figure(figsize=(6,7))
plt.plot(t_tot,(intercept_t0 + slope_t0*t_tot),\
'k--', lw=3,\
label=r"Slope = {0:.3f}+/-{2:.3f}, R$^2$ = {1:.2f}".\
format(slope_t0, r_value_t0**2,1.96*std_err_t0 ))
sns.scatterplot(x=df_t0_t_mean['lifetime'], y=df_t0_t_mean['vel'], \
color='#7fb800', s = 300, ci=95, linewidth=0.5,\
label=None, edgecolor='k')
ax = sns.lineplot(x=df_t0['lifetime'], y=df_t0['vel'], \
color='#7fb800', ci=95, label=None,\
lw=0)
plt.xlabel('Extension time (sec)', fontsize=ft2)
plt.ylabel(u'Extension rate (${\mu}m$/sec)', fontsize=ft2)
ax.yaxis.set_major_locator(ticker.MultipleLocator(0.1))
ax.xaxis.set_major_locator(ticker.MultipleLocator(5))
ax.tick_params('both', length=5, which='both')
plt.rc('xtick', labelsize=ft2)
plt.rc('ytick', labelsize=ft2)
plt.ylim([0.1, 0.4])
plt.xlim([0, 15])
plt.tight_layout()
plt.legend(loc='upper right', prop={'size': 13})
# plt.savefig('201217_t0_cable_ext_linearfit.svg')
with sns.axes_style(st):
plt.figure(figsize=(6,7))
plt.plot(t_tot,(intercept_t8 + slope_t8*t_tot),\
'k--', lw=3,\
label=r"Slope = {0:.3f}+/-{2:.3f}, R$^2$ = {1:.2f}".\
format(slope_t8,r_value_t8**2,1.96*std_err_t8 ))
sns.scatterplot(x=df_t8_t_mean['lifetime'], y=df_t8_t_mean['vel'], \
color='#ffb400', s = 300, ci=95, linewidth=0.5,\
label=None, edgecolor='k')
ax = sns.lineplot(x=df_t8['lifetime'], y=df_t8['vel'], \
color='#ffb400', ci=95, label=None,\
lw=0)
plt.xlabel('Extension time (sec)', fontsize=ft2)
plt.ylabel(u'Extension rate (${\mu}m$/sec)', fontsize=ft2)
ax.yaxis.set_major_locator(ticker.MultipleLocator(0.1))
ax.xaxis.set_major_locator(ticker.MultipleLocator(5))
ax.tick_params('both', length=5, which='both')
plt.rc('xtick', labelsize=ft2)
plt.rc('ytick', labelsize=ft2)
plt.ylim([0.1, 0.4])
plt.xlim([0, 15])
plt.tight_layout()
plt.legend(loc='upper right', prop={'size': 13})
# plt.savefig('201217_t8_cable_ext_linearfit.svg')
#=============================================================================
#setup of the inital rates for each strain
df2 = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
"""Download files from Brazilian Flora 2020 Web Service."""
import argparse
import json
import os
import random
import socket
import sys
import textwrap
import time
import urllib.request
from urllib.error import HTTPError
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
from traiter.util import log
from brazil.pylib.util import species_path
from brazil.pylib.consts import BRAZIL_DIR, BRAZIL_FAMILIES
# Website to crawl
SITE = 'http://servicos.jbrj.gov.br/flora/'
# Don't hit the site too hard
SLEEP_MID = 15
SLEEP_RADIUS = 5
SLEEP_RANGE = (SLEEP_MID - SLEEP_RADIUS, SLEEP_MID + SLEEP_RADIUS)
WAIT = 20 # How many seconds to wait for the page action to happen
# Set a timeout for requests
TIMEOUT = 60
socket.setdefaulttimeout(TIMEOUT)
def main(args):
"""Download the data."""
if args.all_families:
all_families()
sys.exit()
if not args.family_action:
log('Error: You must choose a --family-action.')
sys.exit()
if args.family_action == 'list':
species(args)
elif args.family_action == 'pages':
pages(args)
def all_families():
"""Save a list of families."""
url = SITE + 'families'
urllib.request.urlretrieve(url, BRAZIL_FAMILIES)
def species(args):
"""Download all species for a family."""
url = SITE + f'species/{args.family}'
urllib.request.urlretrieve(url, species_path(args.family))
def pages(args):
"""Download all treatment pages for a family."""
driver = webdriver.Firefox(log_path=args.log_file)
driver.implicitly_wait(2)
path = species_path(args.family)
if not path.exists():
sys.exit(f'The file {path} does not exist.')
with open(path) as json_file:
data = json.load(json_file)
df = | pd.DataFrame(data['result']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from .._utils import color_digits, color_background
from ..data import Data, DataSamples
#from ..woe import WOE
import pandas as pd
#import math as m
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.colors import LinearSegmentedColormap
import seaborn as sns
from sklearn.model_selection import cross_val_score, StratifiedKFold, train_test_split, GridSearchCV, PredefinedSplit
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_auc_score, roc_curve, auc
#rom scipy.stats import chi2, chisquare, ks_2samp, ttest_ind
#import statsmodels.formula.api as sm
import warnings
from abc import ABCMeta, abstractmethod
#from sklearn.feature_selection import GenericUnivariateSelect, f_classif
from patsy import dmatrices
from statsmodels.stats.outliers_influence import variance_inflation_factor
import re
import ast
import os
import xlsxwriter
from PIL import Image
import datetime
from dateutil.relativedelta import *
import gc
#import weakref
import copy
import itertools
import calendar
#from ..cross import DecisionTree, Crosses
import networkx as nx
from operator import itemgetter
import matplotlib.ticker as mtick
try:
import fastcluster
except Exception:
print('For fullness analysis using hierarchical clustering please install fastcluster package.')
from scipy.cluster.hierarchy import fcluster
try:
import hdbscan
except Exception:
print('For fullness analysis using HDBSCAN clustering please install hdbscan package.')
from sklearn.cluster import KMeans
from sklearn.tree import export_graphviz
from os import system
from IPython.display import Image as Display_Image
#from joblib import Parallel, delayed
# Created by <NAME> and <NAME>
warnings.simplefilter('ignore')
plt.rc('font', family='Verdana')
plt.style.use('seaborn-darkgrid')
pd.set_option('display.precision', 3)
class Processor(metaclass = ABCMeta):
"""
Base class for processing objects of Data class
"""
@abstractmethod
def __init__(self):
'''
self.stats is a DataFrame with statistics about self.work()
'''
self.stats = pd.DataFrame()
@abstractmethod
def work(self, data, parameters):
pass
def param_dict_to_stats(self, data, d):
'''
TECH
Transforms a dict of parameters to self.stats
Parameters
-----------
data: Data object being processed
d: dictionary {action : list_of_features} where action is a string with action description and list_of_features is a list of features' names to apply the action to
'''
col1 = []
col2 = []
for (how, features) in d.items():
col1 = col1 + [how + ' (' + str(round(data.dataframe[features[i]].mean(), 3)) + ')' if how == 'mean' else how for i in range(len(features))]
col2 = col2 + features
self.stats = pd.DataFrame({'action' : col1, 'features': col2})
#---------------------------------------------------------------
class MissingProcessor(Processor):
'''
Class for missing values processing
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, parameters, quantiles=100, precision=4):
'''
Deals with missing values
Parameters:
-----------
data: an object of Data type that should be processed
inplace: whether to change the data or to create a new Data object
parameters: {how_to_process : features_to_process}
how_to_process takes:
'delete' - to delete samples where the value of any feature from features_to_process is missing
'mean' - for each feature from features_to_process to fill missings with the mean value
'distribution' - for each feature from features_to_process to fill missings according to non-missing distribution
a value - for each feature from features_to_process to fill missings with this value
features_to_process takes list of features from data
quantiles: number of quantiles for 'distribution' type of missing process - all values are divided into quantiles,
then missing values are filled with average values of quantiles. If number of unique values is less then number of quantiles
or field type is not int, float, etc, then no quantiles are calculated - missings are filled with existing values according
to their frequency
precision: precision for quantile edges and average quantile values
Returns:
----------
A copy of data with missings processed for features mentioned in parameters
'''
for how in parameters:
if isinstance(parameters[how], str):
parameters[how] = [parameters[how]]
result = data.dataframe.copy()
for how in parameters:
if how == 'delete':
for feature in parameters[how]:
result = result[result[feature].isnull() == False]
if data.features != None and feature in data.features:
data.features.remove(feature)
elif how == 'mean':
for feature in parameters[how]:
result[feature].fillna(result[feature].mean(), inplace = True)
elif how == 'distribution':
for feature in parameters[how]:
if data.dataframe[feature].dtype not in (float, np.float32, np.float64, int, np.int32, np.int64) or data.dataframe[feature].unique().shape[0]<quantiles:
summarized=data.dataframe[[feature]].dropna().groupby(feature).size()
summarized=summarized.reset_index().rename({feature:'mean', 0:'size'}, axis=1)
else:
summarized=data.dataframe[[feature]].rename({feature:'value'}, axis=1).join(pd.qcut(data.dataframe[feature].dropna(), q=quantiles, precision=4, duplicates='drop')).groupby(feature).agg(['mean', 'size'])
summarized.columns=summarized.columns.droplevel()
summarized=summarized.reset_index(drop=True)
#summarized=summarized.reset_index()
summarized['p']=summarized['size']/summarized['size'].sum()
result[feature]=result[feature].apply(lambda x: np.random.choice(summarized['mean'].round(precision), p=summarized['p']) if pd.isnull(x) else x)
else:
result[parameters[how]] = result[parameters[how]].fillna(how)
# statistics added on Dec-04-2018
self.param_dict_to_stats(data, parameters)
return Data(result, data.target, data.features, data.weights, data.name)
#---------------------------------------------------------------
class StabilityAnalyzer(Processor):
'''
For stability analysis
'''
def __init__(self):
self.stats = pd.DataFrame({'sample_name' : [], 'parameter' : [], 'meaning': []})
def work(self, data, time_column, sample_name = None, psi = None, event_rate=None, normalized=True, date_format = "%d.%m.%Y", time_func = (lambda x: 100*x.year + x.month),
yellow_zone = 0.1, red_zone = 0.25, figsize = None, out = True, out_images = 'StabilityAnalyzer/', sep=';', base_period_index=0):
'''
Calculates the dynamic of feature (or groups of values) changes over time so it should be used only for discrete or WOE-transformed
features. There are 2 types of analysis:
PSI. Represents a heatmap (Stability Table) of features stability that contains 3 main zones: green (the feature is
stable), yellow (the feature is not very stable) and red (the feature is unstable). StabilityIndex (PSI) is calculated for each
time period relatively to the first period.
Stability index algorithm:
For each feature value and time period number of samples is calculated: e.g., N[i, t] is number of samples for value i and time period t.
StabilityIndex[t] = (N[i, t]/sum_i(N[i, t]) - (N[i, 0]/sum_i(N[i, 0])))* log(N[i, t]/sum_i(N[i, t])/(N[i, 0]/sum_i(N[i, 0])))
ER (event rate). Calculates average event rate and number of observations for each feature's value over time.
After calculation displays the Stability Table (a heatmap with stability indexes for each feature value and time period)
and Event rate graphs
Parameters:
-----------
data: data to analyze (type Data)
time_column: name of a column with time values to calculate time periods
sample_name: name of sample for report
psi: list of features for PSI analysis (if None then all features from the input Data object will be used)
event_rate: list of features for event rate and distribution in time analysis (if None then all features from the input Data object will be used)
date_format: format of time values in time_column. Codes for format:
%a Weekday as locale’s abbreviated name. Sun, Mon, …, Sat (en_US)
%A Weekday as locale’s full name. Sunday, Monday, …, Saturday (en_US)
%w Weekday as a decimal number, where 0 is Sunday and 6 is Saturday. 0, 1, …, 6
%d Day of the month as a zero-padded decimal number. 01, 02, …, 31
%b Month as locale’s abbreviated name. Jan, Feb, …, Dec (en_US)
%B Month as locale’s full name. January, February, …, December (en_US)
%m Month as a zero-padded decimal number. 01, 02, …, 12
%y Year without century as a zero-padded decimal number. 00, 01, …, 99
%Y Year with century as a decimal number. 1970, 1988, 2001, 2013
%H Hour (24-hour clock) as a zero-padded decimal number. 00, 01, …, 23
%I Hour (12-hour clock) as a zero-padded decimal number. 01, 02, …, 12
%p Locale’s equivalent of either AM or PM. AM, PM (en_US)
%M Minute as a zero-padded decimal number. 00, 01, …, 59
%S Second as a zero-padded decimal number. 00, 01, …, 59
%f Microsecond as a decimal number, zero-padded on the left. 000000, 000001, …, 999999
%z UTC offset in the form +HHMM or -HHMM (empty string if the the
object is naive). (empty), +0000, -0400, +1030
%Z Time zone name (empty string if the object is naive). (empty), UTC, EST, CST
%j Day of the year as a zero-padded decimal number. 001, 002, …, 366
%U Week number of the year (Sunday as the first day of the week)
as a zero padded decimal number. All days in a new year preceding
the first Sunday are considered to be in week 0. 00, 01, …, 53 (6)
%W Week number of the year (Monday as the first day of the week) as
a decimal number. All days in a new year preceding the first
Monday are considered to be in week 0. 00, 01, …, 53 (6)
%c Locale’s appropriate date and time representation. Tue Aug 16 21:30:00 1988 (en_US)
%x Locale’s appropriate date representation. 08/16/88 (None); 08/16/1988 (en_US)
%X Locale’s appropriate time representation. 21:30:00 (en_US)
time_func: function for time_column parsing (changes date to some value, representing time period) or
a period type for dt.to_period() function. Codes for available periods:
B business day frequency
C custom business day frequency (experimental)
D calendar day frequency
W weekly frequency
M month end frequency
BM business month end frequency
CBM custom business month end frequency
MS month start frequency
BMS business month start frequency
CBMS custom business month start frequency
Q quarter end frequency
BQ business quarter endfrequency
QS quarter start frequency
BQS business quarter start frequency
A year end frequency
BA business year end frequency
AS year start frequency
BAS business year start frequency
BH business hour frequency
H hourly frequency
T, min minutely frequency
S secondly frequency
L, ms milliseconds
U, us microseconds
N nanoseconds
yellow_zone: the lower border for the yellow stability zone ('not very stable') in percents of derivation
red_zone: the lower border for the red stability zone ('unstable') in percents of derivation
figsize: matplotlib figsize of the Stability Table
out: a boolean for image output or a path for xlsx output file to export the Stability Tables
out_images: a path for image output (default - StabilityAnalyzer/)
sep: the separator to be used in case of csv export
base_period_index: index of period (starting from 0) for other periods to compare with (0 for the first, -1 for the last)
'''
print('Warning: only for discrete features!!!')
if sample_name is None:
if pd.isnull(data.name):
sample_name = 'sample'
else:
sample_name = data.name
out_images = out_images + sample_name + '/'
self.stats = self.stats.append(pd.DataFrame({'sample_name' : [sample_name], 'parameter' : ['out'], 'meaning' : [out]}))
self.stats = self.stats.append(pd.DataFrame({'sample_name' : [sample_name], 'parameter' : ['out_images'], 'meaning' : [out_images]}))
psi = data.features.copy() if psi is None else [x for x in psi if x in data.features]
event_rate = data.features.copy() if event_rate is None else [x for x in event_rate if x in data.features]
all_features=list(set(psi+event_rate))
if figsize is None:
figsize=(12, max(1,round(len(psi)/2,0)))
if out==True or isinstance(out, str):
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
if isinstance(out, str):
writer = pd.ExcelWriter(out, engine='openpyxl')
tmp_dataset = data.dataframe[all_features + [time_column, data.target] + ([] if data.weights is None else [data.weights])].copy()
tmp_dataset[time_column] = pd.to_datetime(tmp_dataset[time_column], format=date_format, errors='coerce')
if callable(time_func):
tmp_dataset['tt'] = tmp_dataset[time_column].map(time_func)
elif isinstance(time_func, str):
try:
tmp_dataset['tt'] = tmp_dataset[time_column].dt.to_period(time_func).astype(str)
except Exception:
print('No function or correct period code was provided. Return None.')
return None
c = 0
for feature in sorted(all_features):
print (feature)
if data.weights is not None:
feature_stats=tmp_dataset[[feature, 'tt', data.target, data.weights]]
feature_stats['---weight---']=feature_stats[data.weights]
else:
feature_stats=tmp_dataset[[feature, 'tt', data.target]]
feature_stats['---weight---']=1
feature_stats[data.target]=feature_stats[data.target]*feature_stats['---weight---']
feature_stats=feature_stats[[feature, 'tt', data.target, '---weight---']].groupby([feature, 'tt'], as_index=False).\
agg({'---weight---':'size', data.target:'mean'}).rename({feature:'value', '---weight---':'size', data.target:'mean'}, axis=1)
feature_stats['feature']=feature
if c == 0:
all_stats = feature_stats
c = c+1
else:
all_stats = all_stats.append(feature_stats, ignore_index=True)
all_stats['size']=all_stats['size'].astype(float)
all_stats['mean']=all_stats['mean'].astype(float)
if len(psi)>0:
stability1=all_stats[all_stats.feature.isin(psi)][['feature', 'value', 'tt', 'size']].pivot_table(values='size', columns='tt', index=['feature', 'value']).reset_index().fillna(0)
stability1.columns.name=None
#display(stability1)
dates = stability1.drop(['feature', 'value'], 1).columns.copy()
stability2 = stability1[['feature', 'value']].copy()
for date in dates:
stability2[date] = list(stability1[date]/list(stability1.drop(['value'], 1).groupby(by = 'feature').sum()[date][:1])[0])
#display(stability2)
start_date = dates[base_period_index]
stability3 = stability2[['feature', 'value']]
for date in dates:
stability3[date] = round(((stability2[date]-stability2[start_date])*np.log(stability2[date]/stability2[start_date])).fillna(0), 2).replace([])
#display(stability3)
stability4 = stability3.drop(['value'], 1).groupby(by = 'feature').sum()
#display(stability4)
fig, ax = plt.subplots(figsize = figsize)
ax.set_facecolor("red")
sns.heatmap(stability4, ax=ax, yticklabels=stability4.index, annot = True, cmap = 'RdYlGn_r', center = yellow_zone, vmax = red_zone, linewidths = .05, xticklabels = True)
if out==True or isinstance(out, str):
plt.savefig(out_images+"stability.png", dpi=100, bbox_inches='tight')
plt.show()
if isinstance(out, str):
if out[-4:]=='.xls' or out[-5:]=='.xlsx':
stability4.style.apply(color_background,
mn=0, mx=red_zone, cntr=yellow_zone).to_excel(writer, engine='openpyxl', sheet_name='PSI')
worksheet = writer.sheets['PSI']
for x in worksheet.columns:
if x[0].column=='A':
worksheet.column_dimensions[x[0].column].width = 40
else:
worksheet.column_dimensions[x[0].column].width = 12
worksheet.freeze_panes = worksheet['B2']
else:
print('Unknown or unacceptable format for export several tables. Use .xlsx. Skipping export.')
if len(event_rate)>0:
for_event_rate=all_stats[all_stats['feature'].isin(event_rate)]
date_base=pd.DataFrame(all_stats['tt'].unique(), columns=['tt']).sort_values('tt')
for feature in sorted(for_event_rate['feature'].unique()):
cur_feature_data=for_event_rate[for_event_rate['feature']==feature].copy()
#display(cur_feature_data)
if normalized:
for tt in sorted(cur_feature_data['tt'].unique(), reverse=True):
cur_feature_data.loc[cur_feature_data['tt']==tt, 'percent']=cur_feature_data[cur_feature_data['tt']==tt]['size']/cur_feature_data[cur_feature_data['tt']==tt]['size'].sum()
#display(cur_feature_data)
fig, ax = plt.subplots(1,1, figsize=(15, 5))
ax2 = ax.twinx()
ax.grid(False)
ax2.grid(False)
sorted_values=sorted(cur_feature_data['value'].unique(), reverse=True)
for value in sorted_values:
to_visualize='percent' if normalized else 'size'
value_filter = (cur_feature_data['value']==value)
er=date_base.merge(cur_feature_data[value_filter], on='tt', how='left')['mean']
height=date_base.merge(cur_feature_data[value_filter], on='tt', how='left')[to_visualize].fillna(0)
bottom=date_base.merge(cur_feature_data[['tt',to_visualize]][cur_feature_data['value']>value].groupby('tt', as_index=False).sum(), on='tt', how='left')[to_visualize].fillna(0)
ax.bar(range(date_base.shape[0]), height, bottom=bottom if value!=sorted_values[0] else None, edgecolor='white', alpha=0.3)
ax2.plot(range(date_base.shape[0]), er, label=str(round(value,3)), linewidth=2)
plt.xticks(range(date_base.shape[0]), date_base['tt'])
fig.autofmt_xdate()
ax2.set_ylabel('Event Rate')
ax2.yaxis.set_major_formatter(FuncFormatter(lambda y, _: '{:.2%}'.format(y)))
if normalized:
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: '{:.2%}'.format(y)))
ax2.annotate('Obs:', xy=(0, 1), xycoords=('axes fraction', 'axes fraction'), xytext=(-25, 5), textcoords='offset pixels', color='blue', size=11)
for i in range(date_base.shape[0]):
ax2.annotate(str(int(cur_feature_data[cur_feature_data['tt']==date_base['tt'][i]]['size'].sum())),
xy=(i, 1),
xycoords=('data', 'axes fraction'),
xytext=(0, 5),
textcoords='offset pixels',
#rotation=60,
ha='center',
#va='bottom',
color='blue',
size=11)
ax.set_ylabel('Total obs')
plt.xlabel(time_column)
plt.suptitle(feature + ' event rate in time' if callable(time_func) else feature + ' event rate in time, period = '+time_func)
handles, labels = ax2.get_legend_handles_labels()
ax2.legend(handles[::-1], labels[::-1], loc=0, fancybox=True, framealpha=0.3)
if out==True or isinstance(out, str):
plt.savefig(out_images+feature+".png", dpi=100, bbox_inches='tight')
plt.show()
if isinstance(out, str):
if out[-4:]=='.xls' or out[-5:]=='.xlsx':
event_rate_df=all_stats[['feature', 'value', 'tt', 'mean']].pivot_table(values='mean', columns='tt', index=['feature', 'value']).reset_index().fillna(0)
event_rate_df.columns.name=None
event_rate_df.style.apply(color_background,
mn=0, mx=all_stats['mean'].mean()+2*all_stats['mean'].std(), cntr=None,
cmap=matplotlib.cm.RdYlGn_r, subset=pd.IndexSlice[:, [x for x in event_rate_df.columns if x not in ['value','feature']]]).to_excel(writer, engine='openpyxl', sheet_name='Event Rate', index=False)
worksheet = writer.sheets['Event Rate']
for x in worksheet.columns:
if x[0].column=='A':
worksheet.column_dimensions[x[0].column].width = 40
else:
worksheet.column_dimensions[x[0].column].width = 12
if x[0].column!='B':
for cell in worksheet[x[0].column]:
if cell.row!=1:
cell.number_format = '0.000%'
worksheet.freeze_panes = worksheet['C2']
size_df=all_stats[['feature', 'value', 'tt', 'size']].pivot_table(values='size', columns='tt', index=['feature', 'value']).reset_index().fillna(0)
size_df.columns.name=None
size_df.style.apply(color_background,
mn=0, mx=all_stats['size'].mean()+2*all_stats['size'].std(), cntr=None,
cmap=matplotlib.cm.RdYlGn, subset=pd.IndexSlice[:, [x for x in size_df.columns if x not in ['value','feature']]]).to_excel(writer, engine='openpyxl', sheet_name='Observations', index=False)
worksheet = writer.sheets['Observations']
for x in worksheet.columns:
if x[0].column=='A':
worksheet.column_dimensions[x[0].column].width = 40
else:
worksheet.column_dimensions[x[0].column].width = 12
worksheet.freeze_panes = worksheet['C2']
else:
print('Unknown or unacceptable format for export several tables. Use .xlsx. Skipping export.')
if isinstance(out, str):
writer.close()
#---------------------------------------------------------------
class DataVisualizer(Processor):
'''
Supports different types of data visualization
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, distribution = True, factorplot = True, factorplot_separate = False, pairplot = None,
out=False, out_images='DataVisualizer/', plot_cells=20, categorical=None):
'''
Produces distribution plot, factorplot, pairplot
Parameters:
-----------
data: data to visualize
distribution: parameter for a distribution plot,
if True - plot for data.features, if list - plot for features from the list, if False - do not use distribution plot
factorplot: parameter for a factorplot,
if True - plot for data.features, if list - plot for features from the list, if False - do not use factorplot
factorplot_separate: if True then separate plots for each target value
pairplot: list of features to make a pairplot for
out: a boolean for images output or a path for xlsx output file
out_images: a path for images output (default - DataVisualizer/)
plot_cells: how many cells would plots get in output excel
categorical: a list of features to be treated as categorical (countplots will be produced instead of distplots)
'''
if pairplot is None:
pairplot=[]
if categorical is None:
categorical=[]
dataframe_t = data.dataframe[data.features + [data.target]].copy()
data = Data(dataframe_t, features = data.features, target = data.target)
if out is not None:
if out==True or isinstance(out, str):
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
if isinstance(out, str):
# Create an new Excel file and add a worksheet.
workbook = xlsxwriter.Workbook(out)
worksheet = workbook.add_worksheet('Data Visualization')
# Widen the first column to make the text clearer.
worksheet.set_column('A:A', 100)
current_plot_number=0
if distribution:
print ('Distributions of features: ')
if type(distribution) == type([1, 1]):
features = distribution
else:
if data.features == None:
print ('No features claimed. Please set data.features = ')
return None
features = data.features
for feature in features:
current_plot_number=current_plot_number+1
if data.dataframe[feature].dtype==object or feature in categorical:
f, axes = plt.subplots()
sns.countplot(data.dataframe[feature].dropna())
f.autofmt_xdate()
else:
sns.distplot(data.dataframe[feature].dropna())
if data.dataframe[feature].isnull().any():
plt.title(feature+' (miss = ' + str(round(data.dataframe[feature].isnull().value_counts()[True]/data.dataframe.shape[0],3))+')')
else:
plt.title(feature+' (miss = 0)')
if out==True or isinstance(out, str):
plt.savefig(out_images+feature+"_d.png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
scale=(20*plot_cells)/Image.open(out_images+feature+"_d.png").size[1]
worksheet.write((current_plot_number-1)*(plot_cells+1), 0, 'Distribution plot for '+feature+":")
worksheet.insert_image((current_plot_number-1)*(plot_cells+1)+1, 0, out_images+feature+"_d.png",
{'x_scale': scale, 'y_scale': scale})
plt.show()
print ('---------------------------------------\n')
if factorplot:
print ('Factorplot: ')
if type(factorplot) == type([1, 1]):
features = factorplot
else:
if data.features == None:
print ('No features claimed. Please set data.features = ')
return None
features = data.features
if factorplot_separate:
for feature in features:
current_plot_number=current_plot_number+1
# edited 21-Jun-2018 by <NAME>
f, axes = plt.subplots(data.dataframe[data.target].drop_duplicates().shape[0], 1, figsize=(4, 4), sharex=True)
f.autofmt_xdate()
#for target_v in data.dataframe[data.target].drop_duplicates():
targets = list(data.dataframe[data.target].drop_duplicates())
for target_i in range(len(targets)):
if data.dataframe[data.dataframe[data.target]==targets[target_i]][feature].isnull().any():
x_label=feature + ': ' + data.target + ' = ' + str(targets[target_i]) + ', miss = ' + str(round(data.dataframe[data.dataframe[data.target]==targets[target_i]][feature].isnull().value_counts()[True]/data.dataframe[data.dataframe[data.target]==targets[target_i]].shape[0],3))
else:
x_label=feature + ': ' + data.target + ' = ' + str(targets[target_i]) + ', miss = 0'
if data.dataframe[feature].dtype==object or feature in categorical:
ax=sns.countplot(data.dataframe[data.dataframe[data.target] == targets[target_i]][feature].dropna(),
ax=axes[target_i], color = 'm')
ax.set(xlabel=x_label)
else:
sns.distplot(data.dataframe[data.dataframe[data.target] == targets[target_i]][feature].dropna(),
ax=axes[target_i],
axlabel=x_label, color = 'm')
if out==True or isinstance(out, str):
plt.savefig(out_images+feature+"_f.png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
scale=(20*plot_cells)/Image.open(out_images+feature+"_f.png").size[1]
worksheet.write((current_plot_number-1)*(plot_cells+1), 0, 'Factor plot for '+feature+":")
worksheet.insert_image((current_plot_number-1)*(plot_cells+1)+1, 0, out_images+feature+"_f.png",
{'x_scale': scale, 'y_scale': scale})
plt.show()
else:
for feature in features:
current_plot_number=current_plot_number+1
sns.factorplot(x=feature, hue = data.target, data = data.dataframe, kind='count', palette = 'Set1')
if out==True or isinstance(out, str):
plt.savefig(out_images+feature+"_f.png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
scale=(20*plot_cells)/Image.open(out_images+feature+"_f.png").size[1]
worksheet.write((current_plot_number-1)*(plot_cells+1), 0, 'Factor plot for '+feature+":")
worksheet.insert_image((current_plot_number-1)*(plot_cells+1)+1, 0, out_images+feature+"_f.png",
{'x_scale': scale, 'y_scale': scale})
plt.show()
print ('---------------------------------------\n')
if pairplot != []:
current_plot_number=current_plot_number+1
print ('Pairplot')
sns.pairplot(data.dataframe[pairplot].dropna())
if out==True or isinstance(out, str):
plt.savefig(out_images+"pairplot.png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
worksheet.write((current_plot_number-1)*(plot_cells+1), 0, 'Pair plot for '+str(pairplot)+":")
worksheet.insert_image((current_plot_number-1)*(plot_cells+1)+1, 0, out_images+"pairplot.png")
plt.show()
if isinstance(out, str):
workbook.close()
#---------------------------------------------------------------
class TargetTrendVisualizer(Processor):
'''
Supports target trend visualization
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, features=None, quantiles=100, magnify_trend=False, magnify_std_number=2, hide_every_even_tick_from=50,
min_size=10, out=False, out_images='TargetTrendVisualizer/', plot_cells=20):
'''
Calculates specified quantiles/takes categories, calculates target rates and sizes, then draws target trends
Parameters:
-----------
data: an object of Data type
features: the list of features to visualize, can be omitted
quantiles: number of quantiles to cut feature values on
magnify_trend: if True, then axis scale for target rate will be corrected to exclude outliers
magnify_std_number: how many standard deviations should be included in magnified scale
hide_every_even_tick_from: if there is too many quantiles then every second tick on x axis will be hidden
out: a boolean for images output or a path for xlsx output file
out_images: a path for images output (default - TargetTrendVisualizer/)
plot_cells: how many cells would plots get in output excel
'''
if features is None:
cycle_features=data.features.copy()
else:
cycle_features=features.copy()
if out is not None:
if out==True or isinstance(out, str):
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
if isinstance(out, str):
# Create an new Excel file and add a worksheet.
workbook = xlsxwriter.Workbook(out)
worksheet = workbook.add_worksheet('Target Trend Visualization')
# Widen the first column to make the text clearer.
worksheet.set_column('A:A', 100)
current_feature_number=0
for f in cycle_features:
if f not in data.dataframe:
print('Feature', f, 'not in input dataframe. Skipping..')
else:
print('Processing', f,'..')
current_feature_number=current_feature_number+1
if data.dataframe[f].dtype not in (float, np.float32, np.float64, int, np.int32, np.int64) or data.dataframe[f].unique().shape[0]<quantiles:
summarized=data.dataframe[[f, data.target]].groupby([f]).agg(['mean', 'size'])
else:
if data.dataframe[f].dropna().shape[0]<min_size*quantiles:
current_quantiles=int(data.dataframe[f].dropna().shape[0]/min_size)
if current_quantiles==0:
print('The number of non-missing observations is less then', min_size,'. No trend to visualize.')
if isinstance(out, str):
worksheet.write((current_feature_number-1)*(plot_cells+1), 0, 'Target trend for '+f+":")
worksheet.write((current_feature_number-1)*(plot_cells+1)+1, 0, 'The number of non-missing observations is less then '+str(min_size)+'. No trend to visualize.')
continue
else:
print('Too few non-missing observations for', quantiles, 'quantiles. Calculating', current_quantiles, 'quantiles..')
else:
current_quantiles=quantiles
summarized=data.dataframe[[data.target]].join(pd.qcut(data.dataframe[f], q=current_quantiles, precision=4, duplicates='drop')).groupby([f]).agg(['mean', 'size'])
small_quantiles=summarized[data.target][summarized[data.target]['size']<min_size]['size']
#display(small_quantiles)
if small_quantiles.shape[0]>0:
current_quantiles=int(small_quantiles.sum()/min_size)+summarized[data.target][summarized[data.target]['size']>=min_size].shape[0]
print('There are quantiles with size less then', min_size,'. Attempting', current_quantiles, 'quantiles..')
summarized=data.dataframe[[data.target]].join(pd.qcut(data.dataframe[f], q=current_quantiles, precision=4, duplicates='drop')).groupby([f]).agg(['mean', 'size'])
summarized.columns=summarized.columns.droplevel()
summarized=summarized.reset_index()
if pd.isnull(data.dataframe[f]).any():
with_na=data.dataframe[[f,data.target]][pd.isnull(data.dataframe[f])]
summarized.loc[-1]=[np.nan, with_na[data.target].mean(), with_na.shape[0]]
summarized=summarized.sort_index().reset_index(drop=True)
if summarized.shape[0]==1:
print('Too many observations in one value, so only 1 quantile was created. Increasing quantile number is recommended. No trend to visualize.')
if isinstance(out, str):
worksheet.write((current_feature_number-1)*(plot_cells+1), 0, 'Target trend for '+f+":")
worksheet.write((current_feature_number-1)*(plot_cells+1)+1, 0, 'Too many observations in one value, so only 1 quantile was created. Increasing quantile number is recommended. No trend to visualize.')
continue
fig = plt.figure(figsize=(15,5))
ax = fig.add_subplot(111)
ax.set_ylabel('Observations')
# blue is for the distribution
if summarized.shape[0]>hide_every_even_tick_from:
plt.xticks(range(summarized.shape[0]), summarized[f].astype(str), rotation=60, ha="right")
xticks = ax.xaxis.get_major_ticks()
for i in range(len(xticks)):
if i%2==0:
xticks[i].label1.set_visible(False)
else:
plt.xticks(range(summarized.shape[0]), summarized[f].astype(str), rotation=45, ha="right")
ax.bar(range(summarized.shape[0]), summarized['size'], zorder=0, alpha=0.3)
ax.grid(False)
ax.grid(axis='y', zorder=1, alpha=0.6)
ax2 = ax.twinx()
ax2.set_ylabel('Target Rate')
ax2.grid(False)
#display(summarized)
if magnify_trend:
ax2.set_ylim([0, np.average(summarized['mean'], weights=summarized['size'])+magnify_std_number*np.sqrt(np.cov(summarized['mean'], aweights=summarized['size']))])
for i in range(len(summarized['mean'])):
if summarized['mean'][i]>np.average(summarized['mean'], weights=summarized['size'])+magnify_std_number*np.sqrt(np.cov(summarized['mean'], aweights=summarized['size'])):
ax2.annotate(str(round(summarized['mean'][i],4)),
xy=(i, np.average(summarized['mean'], weights=summarized['size'])+magnify_std_number*np.sqrt(np.cov(summarized['mean'], aweights=summarized['size']))),
xytext=(i, np.average(summarized['mean'], weights=summarized['size'])+(magnify_std_number+0.05)*np.sqrt(np.cov(summarized['mean'], aweights=summarized['size']))),
rotation=60,
ha='left',
va='bottom',
color='red',
size=8.5
)
# red is for the target rate values
ax2.plot(range(summarized.shape[0]), summarized['mean'], 'ro-', linewidth=2.0, zorder=4)
if out==True or isinstance(out, str):
plt.savefig(out_images+f+".png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
scale=(20*plot_cells)/Image.open(out_images+f+".png").size[1]
worksheet.write((current_feature_number-1)*(plot_cells+1), 0, 'Target trend for '+f+":")
worksheet.insert_image((current_feature_number-1)*(plot_cells+1)+1, 0, out_images+f+".png",
{'x_scale': scale, 'y_scale': scale})
plt.show()
if isinstance(out, str):
workbook.close()
class CorrelationAnalyzer(Processor):
'''
Produces correlation analysis
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, drop_features = True, features = None, features_to_leave = None, threshold=0.6, method = 'spearman',
drop_with_most_correlations=True, verbose=False, out_before=None, out_after=None, sep=';', cdict = None):
'''
Calculates the covariance matrix and correlation coefficients for each pair of features.
For each highly correlated pair the algorithm chooses the less significant feature and adds it to the delete list.
Parameters
-----------
data: a Data or DataSamples object to check (in case of DataSamples, train sample will be checked)
drop_features: permission to delete correlated features and return a Data object without them
features: a list of features to analyze; by default - all the features
features_to_leave: a list of features that must not be deleted from the feature list
threshold: the lowest value of a correlation coefficient for two features to be considered correlated
method: method for correlation calculation
drop_with_most_correlations: should the features with the highest number of correlations be excluded first (otherwise just with any number of correlations and the lowest gini)
verbose: flag for detailed output
out_before: file name for export of correlation table before feature exclusion (.csv and .xlsx types are supported)
out_after: file name for export of correlation table after feature exclusion (.csv and .xlsx types are supported)
sep: the separator in case of .csv export
Returns
--------
Resulting Data or DataSamples object and the correlation table
'''
if features is None:
features=[]
if features_to_leave is None:
features_to_leave=[]
self.stats = pd.DataFrame({'drop_features' : [drop_features], 'threshold' : [threshold], 'method' : [method], 'out_before' : out_before, 'out_after' : out_after})
if type(data)==DataSamples:
sample=data.train
else:
sample=data
if len(sample.ginis)==0:
print('No calculated ginis in datasamples.train/data object. Set calc_gini=True while using WOE.transform or use Data.calc_gini. Return None')
return None
if features == [] or features is None:
candidates = sample.features.copy()
else:
candidates = features.copy()
features_to_drop = []
correlations = sample.dataframe[candidates].corr(method = method)
cor_out=correlations.copy()
if cdict is None:
cdict = {'red' : ((0.0, 0.9, 0.9),
(0.5, 0.05, 0.05),
(1.0, 0.9, 0.9)),
'green': ((0.0, 0.0, 0.0),
(0.5, 0.8, 0.8),
(1.0, 0.0, 0.0)),
'blue' : ((0.0, 0.1, 0.1),
(0.5, 0.1, 0.1),
(1.0, 0.1, 0.1))}
#edited 21.08.2018 by <NAME> - added verbose variant, optimized feature dropping
# edited on Dec-06-18 by <NAME>: added png
draw_corr=correlations.copy()
draw_corr.index=[x+' (%i)' % i for i,x in enumerate(draw_corr.index)]
draw_corr.columns=range(len(draw_corr.columns))
if out_before is not None:
out_before_png = 'corr_before.png'
if out_before[-4:]=='.csv':
draw_corr.round(2).to_csv(out_before, sep = sep)
out_before_png = out_before[:-4] + '.png'
elif out_before[-5:]=='.xlsx' or out_before[-4:]=='.xls':
draw_corr.round(2).style.applymap(color_digits, threshold_red=threshold, threshold_yellow=threshold**2).to_excel(out_before, engine='openpyxl', sheet_name='Correlation (before)')
out_before_png = out_before[:-5] + '.png' if out_before[-5:]=='.xlsx' else out_before[:-4] + '.png'
elif out_before[-4:]=='.png':
out_before_png = out_before
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
fig_before = sns.heatmap(draw_corr.round(2), annot = True, cmap = LinearSegmentedColormap('mycmap', cdict), cbar = False, center = 0, yticklabels = True, xticklabels = True).figure
fig_before.set_size_inches(draw_corr.shape[0]/2, draw_corr.shape[0]/2)
fig_before.savefig(out_before_png, bbox_inches='tight')
plt.close()
self.stats['out_before'] = out_before_png
if verbose:
display(draw_corr.round(2).style.applymap(color_digits, threshold_red=threshold, threshold_yellow=threshold**2))
to_check_correlation=True
while to_check_correlation:
to_check_correlation=False
corr_number={}
significantly_correlated={}
for var in correlations:
var_corr=correlations[var].apply(lambda x: abs(x))
var_corr=var_corr[(var_corr.index!=var) & (var_corr>threshold)].sort_values(ascending=False).copy()
corr_number[var]=var_corr.shape[0]
significantly_correlated[var]=str(var_corr.index.tolist())
if drop_with_most_correlations:
with_correlation={x:sample.ginis[x] for x in corr_number if corr_number[x]==max({x:corr_number[x] for x in corr_number if x not in features_to_leave}.values()) and corr_number[x]>0 and x not in features_to_leave}
else:
with_correlation={x:sample.ginis[x] for x in corr_number if corr_number[x]>0 and x not in features_to_leave}
if len(with_correlation)>0:
feature_to_drop=min(with_correlation, key=with_correlation.get)
features_to_drop.append(feature_to_drop)
if verbose:
print('Dropping %(v)s because of high correlation with features: %(f)s (Gini=%(g)0.2f)' % {'v':feature_to_drop, 'f':significantly_correlated[feature_to_drop], 'g':with_correlation[feature_to_drop]})
correlations=correlations.drop(feature_to_drop,axis=1).drop(feature_to_drop,axis=0).copy()
to_check_correlation=True
draw_corr=correlations.copy()
draw_corr.index=[x+' (%i)' % i for i,x in enumerate(draw_corr.index)]
draw_corr.columns=range(len(draw_corr.columns))
out_after_png = 'corr_after.png'
if out_after is not None:
if out_after[-4:]=='.csv':
draw_corr.round(2).to_csv(out_after, sep = sep)
out_after_png = out_after[:-4] + '.png'
elif out_after[-5:]=='.xlsx' or out_after[-4:]=='.xls':
draw_corr.round(2).style.applymap(color_digits, threshold_red=threshold, threshold_yellow=threshold**2).to_excel(out_after, engine='openpyxl', sheet_name='Correlation (after)')
out_after_png = out_after[:-5] + '.png' if out_after[-5:]=='.xlsx' else out_after[:-4] + '.png'
elif out_after[-4:]=='.png':
out_after_png = out_after
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
#sns.heatmap(draw_corr.round(2), annot = True, cmap = 'RdBu_r', cbar = False, center = 0).figure.savefig(out_after_png, bbox_inches='tight')
fig_after = sns.heatmap(draw_corr.round(2), annot = True, cmap = LinearSegmentedColormap('mycmap', cdict), cbar = False, center = 0, yticklabels = True, xticklabels = True).figure
fig_after.set_size_inches(draw_corr.shape[0]/2, draw_corr.shape[0]/2)
fig_after.savefig(out_after_png, bbox_inches='tight')
plt.close()
if verbose:
display(draw_corr.round(2).style.applymap(color_digits, threshold_red=threshold, threshold_yellow=threshold**2))
self.stats['out_after'] = out_after_png
result_data = copy.deepcopy(data)
if drop_features:
result_data.features_exclude(features_to_drop, verbose=False)
if verbose:
print('Dropped (if drop_features=True):', features_to_drop)
return result_data, cor_out
def find_correlated_groups(self, data, features = None, features_to_leave = None, threshold=0.6, method = 'spearman',
verbose=False, figsize=(12,12), corr_graph_type='connected'):
'''
Calculates the covariance matrix and correlation coefficients for each pair of features and
returns groups of significantly correlated features
Parameters
-----------
data: a Data or DataSamples object to check (in case of DataSamples it's train sample will be checked)
features: a list of features to analyze; by default - all the features
features_to_leave: a list of features that must not be included in analysis
threshold: the lowest value of a correlation coefficient for two features to be considered correlated
method: method for correlation calculation
verbose: flag for detailed output
figsize: the size of correlation connections graph (printed if verbose)
corr_graph_type: type of connectivity to persue in finding groups of correlated features
'connected' - groups are formed from features directly or indirectly connected by singnificant correlation
'complete' - groups are formed from features that are directly connected to each other by significant
correlation (each pair of features from a group will have a significant connection)
Returns
--------
a list of lists representing correlated group
'''
if features is None:
features=[]
if features_to_leave is None:
features_to_leave=[]
if type(data)==DataSamples:
sample=data.train
else:
sample=data
if features == [] or features is None:
candidates = [x for x in sample.features if x not in features_to_leave]
else:
candidates = [x for x in features if x not in features_to_leave]
correlations = sample.dataframe[candidates].corr(method = method)
if verbose:
draw_corr=correlations.copy()
draw_corr.index=[x+' (%i)' % i for i,x in enumerate(draw_corr.index)]
draw_corr.columns=range(len(draw_corr.columns))
display(draw_corr.round(2).style.applymap(color_digits,threshold_red=threshold))
G=nx.Graph()
for i in range(correlations.shape[0]):
for j in range(i+1, correlations.shape[0]):
if correlations.loc[correlations.columns[i], correlations.columns[j]]>threshold:
G.add_nodes_from([correlations.columns[i], correlations.columns[j]])
G.add_edge(correlations.columns[i], correlations.columns[j], label=str(round(correlations.loc[correlations.columns[i], correlations.columns[j]],3)))
if verbose:
plt.figure(figsize=(figsize[0]*1.2, figsize[1]))
pos = nx.spring_layout(G, k=100)
edge_labels = nx.get_edge_attributes(G,'label')
nx.draw(G, pos, with_labels=True)
nx.draw_networkx_edge_labels(G, pos, edge_labels = edge_labels)
plt.margins(x=0.2)
plt.show()
correlated_groups=[]
if corr_graph_type=='connected':
for x in nx.connected_components(G):
correlated_groups.append(sorted(list(x)))
elif corr_graph_type=='complete':
for x in nx.find_cliques(G):
correlated_groups.append(sorted(x))
else:
print('Unknown correlation graph type. Please use "connected" or "complete". Return None.')
return None
return correlated_groups
#---------------------------------------------------------------
class VIF(Processor):
'''
Calculates variance inflation factor for each feature
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, drop_features = False, features=None, features_to_leave=None, threshold = 5,
drop_with_highest_VIF=True, verbose=True, out=None, sep=';'):
'''
Parameters
-----------
data: a Data or DataSamples object to check VIF on (in case of DataSamples it's train sample will be checked)
drop_features: permition to delete excluded features and return a Data object without them
features: a list of features to analyze; by default - all the features
features_to_leave: a list of features that must not be deleted from the feature list
threshold: the lowest value of VIF for feature to be excluded
drop_with_highest_VIF: should the features with the highest VIF be excluded first (otherwise just with the lowest gini)
verbose: flag for detailed output
out: file name for export of VIF values (.csv and .xlsx types are supported)
sep: the separator in case of .csv export
Returns
---------
Data or DataSamples object without excluded features
A pandas DataFrame with VIF values on different iterations
'''
if features_to_leave is None:
features_to_leave=[]
self.stats = pd.DataFrame({'drop_features' : [drop_features], 'threshold' : [threshold], 'out' : [out]})
if type(data)==DataSamples:
sample=data.train
else:
sample=data
if len(sample.ginis)==0:
print('No calculated ginis in datasamples.train/data object. Set calc_gini=True while using WOE.transform or use Data.calc_gini. Return None')
return None
if features is None:
features = sample.features.copy()
features_to_drop = []
to_check_VIF = True
vifs_df=pd.DataFrame(index=features)
iteration=-1
while to_check_VIF:
to_check_VIF = False
iteration=iteration+1
s = sample.target + ' ~ '
for f in features:
s = s + f + '+'
s = s[:-1]
# Break into left and right hand side; y and X
y_, X_ = dmatrices(formula_like=s, data=sample.dataframe, return_type="dataframe")
# For each Xi, calculate VIF
vifs = {features[i-1]:variance_inflation_factor(X_.values, i) for i in range(1, X_.shape[1])}
vifs_df=vifs_df.join( | pd.DataFrame(vifs, index=[iteration]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import scipy
from sklearn import metrics
from FPMax import FPMax
from Apriori import Apriori
from MASPC import MASPC
import csv
from scipy.cluster.hierarchy import fcluster
from scipy.cluster.hierarchy import linkage
from optbinning import ContinuousOptimalBinning
# pd.set_option('display.max_colwidth', -1)
# pd.options.display.max_columns = None
pd.options.display.width = 0
class MASPC_Engine():
def __init__(self, inputFileName, myMinAc=None, myMinOv=None,
myMinSup=None, myK=None,myContainsTemporal=False,
myAutoDiscretize=False,myDiscretizeStrategy=None,myQ=4):
print("inside maspc_engine constructor with "+inputFileName)
self.inputFileName = inputFileName
self.outputFileFolder = '/tmp/'
self.sortedInputFile = self.outputFileFolder+'sortedInputFile.csv'
self.myMinAc = myMinAc
self.myMinOv = myMinOv
self.myMinSup = myMinSup
self.myK = myK
self.myContainsTemporal = myContainsTemporal
self.myAutoDiscretize = myAutoDiscretize
self.myDiscretizeStrategy = myDiscretizeStrategy
self.myQ = myQ
# First thing we do is sort input file
self.__sortInputFile(self.inputFileName)
self.rtDataFrame = | pd.read_csv(self.sortedInputFile, dtype=str) | pandas.read_csv |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import collections
import numpy as np
import re
from numpy import array
from statistics import mode
import pandas as pd
import warnings
import copy
from joblib import Memory
from itertools import chain
import ast
import timeit
from sklearn.neighbors import KNeighborsClassifier # 1 neighbors
from sklearn.svm import SVC # 1 svm
from sklearn.naive_bayes import GaussianNB # 1 naive bayes
from sklearn.neural_network import MLPClassifier # 1 neural network
from sklearn.linear_model import LogisticRegression # 1 linear model
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis # 2 discriminant analysis
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, GradientBoostingClassifier # 4 ensemble models
from joblib import Parallel, delayed
import multiprocessing
from sklearn.pipeline import make_pipeline
from sklearn import model_selection
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import log_loss
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from imblearn.metrics import geometric_mean_score
import umap
from sklearn.metrics import classification_report
from sklearn.preprocessing import scale
import eli5
from eli5.sklearn import PermutationImportance
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from mlxtend.classifier import StackingCVClassifier
from mlxtend.feature_selection import ColumnSelector
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from scipy.spatial import procrustes
# This block of code == for the connection between the server, the database, and the client (plus routing).
# Access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def Reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global dataSpacePointsIDs
dataSpacePointsIDs = []
global previousStateActive
previousStateActive = []
global StanceTest
StanceTest = False
global status
status = True
global factors
factors = [1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,1,1,1]
global KNNModelsCount
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
global keyData
keyData = 0
KNNModelsCount = 0
SVCModelsCount = 576
GausNBModelsCount = 736
MLPModelsCount = 1236
LRModelsCount = 1356
LDAModelsCount = 1996
QDAModelsCount = 2196
RFModelsCount = 2446
ExtraTModelsCount = 2606
AdaBModelsCount = 2766
GradBModelsCount = 2926
global XData
XData = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
# Initializing models
global resultsList
resultsList = []
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 5
# models
global KNNModels
KNNModels = []
global RFModels
RFModels = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
return 'The reset was done!'
# Retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def RetrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global XData
XData = []
global previousState
previousState = []
global previousStateActive
previousStateActive = []
global status
status = True
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global filterDataFinal
filterDataFinal = 'mean'
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
# models
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
KNNModels = []
SVCModels = []
GausNBModels = []
MLPModels = []
LRModels = []
LDAModels = []
QDAModels = []
RFModels = []
ExtraTModels = []
AdaBModels = []
GradBModels = []
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
elif data['fileName'] == 'StanceC':
StanceTest = True
CollectionDB = mongo.db.StanceC.find()
CollectionDBTest = mongo.db.StanceCTest.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
elif data['fileName'] == 'BiodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
DataSetSelection()
return 'Everything is okay'
def Convert(lst):
it = iter(lst)
res_dct = dict(zip(it, it))
return res_dct
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def SendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
return 'Processed uploaded data set'
# Sent data to client
@app.route('/data/ClientRequest', methods=["GET", "POST"])
def CollectionData():
json.dumps(DataResultsRaw)
response = {
'Collection': DataResultsRaw
}
return jsonify(response)
def DataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
warnings.simplefilter('ignore')
return 'Everything is okay'
def callPreResults():
global XData
global yData
global target_names
global impDataInst
DataSpaceResMDS = FunMDS(XData)
DataSpaceResTSNE = FunTsne(XData)
DataSpaceResTSNE = DataSpaceResTSNE.tolist()
DataSpaceUMAP = FunUMAP(XData)
XDataJSONEntireSetRes = XData.to_json(orient='records')
global preResults
preResults = []
preResults.append(json.dumps(target_names)) # Position: 0
preResults.append(json.dumps(DataSpaceResMDS)) # Position: 1
preResults.append(json.dumps(XDataJSONEntireSetRes)) # Position: 2
preResults.append(json.dumps(yData)) # Position: 3
preResults.append(json.dumps(AllTargets)) # Position: 4
preResults.append(json.dumps(DataSpaceResTSNE)) # Position: 5
preResults.append(json.dumps(DataSpaceUMAP)) # Position: 6
preResults.append(json.dumps(impDataInst)) # Position: 7
# Sending each model's results to frontend
@app.route('/data/requestDataSpaceResults', methods=["GET", "POST"])
def SendDataSpaceResults():
global preResults
callPreResults()
response = {
'preDataResults': preResults,
}
return jsonify(response)
# Main function
if __name__ == '__main__':
app.run()
# Debugging and mirroring client
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
if app.debug:
return requests.get('http://localhost:8080/{}'.format(path)).text
return render_template("index.html")
# This block of code is for server computations
def column_index(df, query_cols):
cols = df.columns.values
sidx = np.argsort(cols)
return sidx[np.searchsorted(cols,query_cols,sorter=sidx)].tolist()
def class_feature_importance(X, Y, feature_importances):
N, M = X.shape
X = scale(X)
out = {}
for c in set(Y):
out[c] = dict(
zip(range(N), np.mean(X[Y==c, :], axis=0)*feature_importances)
)
return out
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/EnsembleMode', methods=["GET", "POST"])
def EnsembleMethod():
global crossValidation
global RANDOM_SEED
global XData
RANDOM_SEED = 42
RetrievedStatus = request.get_data().decode('utf8').replace("'", '"')
RetrievedStatus = json.loads(RetrievedStatus)
modeMethod = RetrievedStatus['defaultModeMain']
if (modeMethod == 'blend'):
crossValidation = ShuffleSplit(n_splits=1, test_size=.20, random_state=RANDOM_SEED)
else:
crossValidation = 5
return 'Okay'
# Initialize every model for each algorithm
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelParameters', methods=["GET", "POST"])
def RetrieveModel():
# get the models from the frontend
RetrievedModel = request.get_data().decode('utf8').replace("'", '"')
RetrievedModel = json.loads(RetrievedModel)
global algorithms
algorithms = RetrievedModel['Algorithms']
toggle = RetrievedModel['Toggle']
global crossValidation
global XData
global yData
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
# loop through the algorithms
global allParametersPerformancePerModel
start = timeit.default_timer()
print('CVorTT', crossValidation)
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = {'n_neighbors': list(range(1, 25)), 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}
AlgorithmsIDsEnd = 0
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True,random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.1,4.43,0.11)), 'kernel': ['rbf','linear', 'poly', 'sigmoid']}
AlgorithmsIDsEnd = SVCModelsCount
elif (eachAlgor) == 'GauNB':
clf = GaussianNB()
params = {'var_smoothing': list(np.arange(0.00000000001,0.0000001,0.0000000002))}
AlgorithmsIDsEnd = GausNBModelsCount
elif (eachAlgor) == 'MLP':
clf = MLPClassifier(random_state=RANDOM_SEED)
params = {'alpha': list(np.arange(0.00001,0.001,0.0002)), 'tol': list(np.arange(0.00001,0.001,0.0004)), 'max_iter': list(np.arange(100,200,100)), 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver' : ['adam', 'sgd']}
AlgorithmsIDsEnd = MLPModelsCount
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.5,2,0.075)), 'max_iter': list(np.arange(50,250,50)), 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}
AlgorithmsIDsEnd = LRModelsCount
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = {'shrinkage': list(np.arange(0,1,0.01)), 'solver': ['lsqr', 'eigen']}
AlgorithmsIDsEnd = LDAModelsCount
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = {'reg_param': list(np.arange(0,1,0.02)), 'tol': list(np.arange(0.00001,0.001,0.0002))}
AlgorithmsIDsEnd = QDAModelsCount
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = RFModelsCount
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = ExtraTModelsCount
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(40, 80)), 'learning_rate': list(np.arange(0.1,2.3,1.1)), 'algorithm': ['SAMME.R', 'SAMME']}
AlgorithmsIDsEnd = AdaBModelsCount
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(85, 115)), 'learning_rate': list(np.arange(0.01,0.23,0.11)), 'criterion': ['friedman_mse', 'mse', 'mae']}
AlgorithmsIDsEnd = GradBModelsCount
allParametersPerformancePerModel = GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossValidation)
# New visualization - model space
# header = "model_id,algorithm_id,mean_test_accuracy,mean_test_precision_micro,mean_test_precision_macro,mean_test_precision_weighted,mean_test_recall_micro,mean_test_recall_macro,mean_test_recall_weighted,mean_test_roc_auc_ovo_weighted,geometric_mean_score_micro,geometric_mean_score_macro,geometric_mean_score_weighted,matthews_corrcoef,f5_micro,f5_macro,f5_weighted,f1_micro,f1_macro,f1_weighted,f2_micro,f2_macro,f2_weighted,log_loss\n"
# dataReceived = []
# counter = 0
# for indx, el in enumerate(allParametersPerformancePerModel):
# dictFR = json.loads(el)
# frame = pd.DataFrame.from_dict(dictFR)
# for ind, elInside in frame.iterrows():
# counter = counter + 1
# dataReceived.append(str(counter))
# dataReceived.append(',')
# dataReceived.append(str(indx+1))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_accuracy']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_roc_auc_ovo_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['matthews_corrcoef']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['log_loss']))
# dataReceived.append("\n")
# dataReceivedItems = ''.join(dataReceived)
# csvString = header + dataReceivedItems
# fw = open ("modelSpace.csv","w+",encoding="utf-8")
# fw.write(csvString)
# fw.close()
# call the function that sends the results to the frontend
stop = timeit.default_timer()
print('Time GridSearch: ', stop - start)
SendEachClassifiersPerformanceToVisualize()
return 'Everything Okay'
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossVal):
print('loop')
# this is the grid we use to train the models
grid = GridSearchCV(
estimator=clf, param_grid=params,
cv=crossVal, refit='accuracy', scoring=scoring,
verbose=0, n_jobs=-1)
# fit and extract the probabilities
grid.fit(XData, yData)
# process the results
cv_results = []
cv_results.append(grid.cv_results_)
df_cv_results = pd.DataFrame.from_dict(cv_results)
# number of models stored
number_of_models = len(df_cv_results.iloc[0][0])
# initialize results per row
df_cv_results_per_row = []
# loop through number of models
modelsIDs = []
for i in range(number_of_models):
modelsIDs.append(AlgorithmsIDsEnd+i)
# initialize results per item
df_cv_results_per_item = []
for column in df_cv_results.iloc[0]:
df_cv_results_per_item.append(column[i])
df_cv_results_per_row.append(df_cv_results_per_item)
# store the results into a pandas dataframe
df_cv_results_classifiers = pd.DataFrame(data = df_cv_results_per_row, columns= df_cv_results.columns)
# copy and filter in order to get only the metrics
metrics = df_cv_results_classifiers.copy()
metrics = metrics.filter(['mean_test_accuracy','mean_test_precision_micro','mean_test_precision_macro','mean_test_precision_weighted','mean_test_recall_micro','mean_test_recall_macro','mean_test_recall_weighted','mean_test_roc_auc_ovo_weighted'])
# concat parameters and performance
parametersPerformancePerModel = pd.DataFrame(df_cv_results_classifiers['params'])
parametersPerformancePerModel = parametersPerformancePerModel.to_json()
parametersLocal = json.loads(parametersPerformancePerModel)['params'].copy()
Models = []
for index, items in enumerate(parametersLocal):
Models.append(str(index))
parametersLocalNew = [ parametersLocal[your_key] for your_key in Models ]
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
PerClassMetric = []
perModelProb = []
perModelPrediction = []
resultsMicro = []
resultsMacro = []
resultsWeighted = []
resultsCorrCoef = []
resultsMicroBeta5 = []
resultsMacroBeta5 = []
resultsWeightedBeta5 = []
resultsMicroBeta1 = []
resultsMacroBeta1 = []
resultsWeightedBeta1 = []
resultsMicroBeta2 = []
resultsMacroBeta2 = []
resultsWeightedBeta2 = []
resultsLogLoss = []
resultsLogLossFinal = []
loop = 8
# influence calculation for all the instances
inputs = range(len(XData))
num_cores = multiprocessing.cpu_count()
#impDataInst = Parallel(n_jobs=num_cores)(delayed(processInput)(i,XData,yData,crossValidation,clf) for i in inputs)
for eachModelParameters in parametersLocalNew:
clf.set_params(**eachModelParameters)
if (toggle == 1):
perm = PermutationImportance(clf, cv = None, refit = True, n_iter = 25).fit(XData, yData)
permList.append(perm.feature_importances_)
n_feats = XData.shape[1]
PerFeatureAccuracy = []
for i in range(n_feats):
scores = model_selection.cross_val_score(clf, XData.values[:, i].reshape(-1, 1), yData, cv=5)
PerFeatureAccuracy.append(scores.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
else:
permList.append(0)
PerFeatureAccuracyAll.append(0)
clf.fit(XData, yData)
yPredict = clf.predict(XData)
yPredict = np.nan_to_num(yPredict)
perModelPrediction.append(yPredict)
# retrieve target names (class names)
PerClassMetric.append(classification_report(yData, yPredict, target_names=target_names, digits=2, output_dict=True))
yPredictProb = clf.predict_proba(XData)
yPredictProb = np.nan_to_num(yPredictProb)
perModelProb.append(yPredictProb.tolist())
resultsMicro.append(geometric_mean_score(yData, yPredict, average='micro'))
resultsMacro.append(geometric_mean_score(yData, yPredict, average='macro'))
resultsWeighted.append(geometric_mean_score(yData, yPredict, average='weighted'))
resultsCorrCoef.append(matthews_corrcoef(yData, yPredict))
resultsMicroBeta5.append(fbeta_score(yData, yPredict, average='micro', beta=0.5))
resultsMacroBeta5.append(fbeta_score(yData, yPredict, average='macro', beta=0.5))
resultsWeightedBeta5.append(fbeta_score(yData, yPredict, average='weighted', beta=0.5))
resultsMicroBeta1.append(fbeta_score(yData, yPredict, average='micro', beta=1))
resultsMacroBeta1.append(fbeta_score(yData, yPredict, average='macro', beta=1))
resultsWeightedBeta1.append(fbeta_score(yData, yPredict, average='weighted', beta=1))
resultsMicroBeta2.append(fbeta_score(yData, yPredict, average='micro', beta=2))
resultsMacroBeta2.append(fbeta_score(yData, yPredict, average='macro', beta=2))
resultsWeightedBeta2.append(fbeta_score(yData, yPredict, average='weighted', beta=2))
resultsLogLoss.append(log_loss(yData, yPredictProb, normalize=True))
maxLog = max(resultsLogLoss)
minLog = min(resultsLogLoss)
for each in resultsLogLoss:
resultsLogLossFinal.append((each-minLog)/(maxLog-minLog))
metrics.insert(loop,'geometric_mean_score_micro',resultsMicro)
metrics.insert(loop+1,'geometric_mean_score_macro',resultsMacro)
metrics.insert(loop+2,'geometric_mean_score_weighted',resultsWeighted)
metrics.insert(loop+3,'matthews_corrcoef',resultsCorrCoef)
metrics.insert(loop+4,'f5_micro',resultsMicroBeta5)
metrics.insert(loop+5,'f5_macro',resultsMacroBeta5)
metrics.insert(loop+6,'f5_weighted',resultsWeightedBeta5)
metrics.insert(loop+7,'f1_micro',resultsMicroBeta1)
metrics.insert(loop+8,'f1_macro',resultsMacroBeta1)
metrics.insert(loop+9,'f1_weighted',resultsWeightedBeta1)
metrics.insert(loop+10,'f2_micro',resultsMicroBeta2)
metrics.insert(loop+11,'f2_macro',resultsMacroBeta2)
metrics.insert(loop+12,'f2_weighted',resultsWeightedBeta2)
metrics.insert(loop+13,'log_loss',resultsLogLossFinal)
perModelPredPandas = pd.DataFrame(perModelPrediction)
perModelPredPandas = perModelPredPandas.to_json()
perModelProbPandas = pd.DataFrame(perModelProb)
perModelProbPandas = perModelProbPandas.to_json()
PerClassMetricPandas = pd.DataFrame(PerClassMetric)
del PerClassMetricPandas['accuracy']
del PerClassMetricPandas['macro avg']
del PerClassMetricPandas['weighted avg']
PerClassMetricPandas = PerClassMetricPandas.to_json()
perm_imp_eli5PD = pd.DataFrame(permList)
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=chi2, k='all')
fit = bestfeatures.fit(XData,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(XData.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
# gather the results and send them back
results.append(modelsIDs) # Position: 0 and so on
results.append(parametersPerformancePerModel) # Position: 1 and so on
results.append(PerClassMetricPandas) # Position: 2 and so on
results.append(PerFeatureAccuracyPandas) # Position: 3 and so on
results.append(perm_imp_eli5PD) # Position: 4 and so on
results.append(featureScores) # Position: 5 and so on
metrics = metrics.to_json()
results.append(metrics) # Position: 6 and so on
results.append(perModelProbPandas) # Position: 7 and so on
results.append(json.dumps(perModelPredPandas)) # Position: 8 and so on
return results
# Sending each model's results to frontend
@app.route('/data/PerformanceForEachModel', methods=["GET", "POST"])
def SendEachClassifiersPerformanceToVisualize():
response = {
'PerformancePerModel': allParametersPerformancePerModel,
}
return jsonify(response)
def Remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
if (isinstance(num, float)):
if np.isnan(num):
pass
else:
final_list.append(float(num))
else:
final_list.append(num)
return final_list
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendBrushedParam', methods=["GET", "POST"])
def RetrieveModelsParam():
RetrieveModelsPar = request.get_data().decode('utf8').replace("'", '"')
RetrieveModelsPar = json.loads(RetrieveModelsPar)
counterKNN = 0
counterSVC = 0
counterGausNB = 0
counterMLP = 0
counterLR = 0
counterLDA = 0
counterQDA = 0
counterRF = 0
counterExtraT = 0
counterAdaB = 0
counterGradB = 0
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
global algorithmsList
algorithmsList = RetrieveModelsPar['algorithms']
for index, items in enumerate(algorithmsList):
if (items == 'KNN'):
counterKNN += 1
KNNModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'SVC'):
counterSVC += 1
SVCModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'GauNB'):
counterGausNB += 1
GausNBModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'MLP'):
counterMLP += 1
MLPModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LR'):
counterLR += 1
LRModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LDA'):
counterLDA += 1
LDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'QDA'):
counterQDA += 1
QDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'RF'):
counterRF += 1
RFModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'ExtraT'):
counterExtraT += 1
ExtraTModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'AdaB'):
counterAdaB += 1
AdaBModels.append(int(RetrieveModelsPar['models'][index]))
else:
counterGradB += 1
GradBModels.append(int(RetrieveModelsPar['models'][index]))
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/factors', methods=["GET", "POST"])
def RetrieveFactors():
global factors
global allParametersPerformancePerModel
Factors = request.get_data().decode('utf8').replace("'", '"')
FactorsInt = json.loads(Factors)
factors = FactorsInt['Factors']
# this is if we want to change the factors before running the search
#if (len(allParametersPerformancePerModel) == 0):
# pass
#else:
global sumPerClassifierSel
global ModelSpaceMDSNew
global ModelSpaceTSNENew
global metricsPerModel
sumPerClassifierSel = []
sumPerClassifierSel = preProcsumPerMetric(factors)
ModelSpaceMDSNew = []
ModelSpaceTSNENew = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
metricsPerModel = preProcMetricsAllAndSel()
flagLocal = 0
countRemovals = 0
for l,el in enumerate(factors):
if el == 0:
loopThroughMetrics.drop(loopThroughMetrics.columns[[l-countRemovals]], axis=1, inplace=True)
countRemovals = countRemovals + 1
flagLocal = 1
if flagLocal == 1:
ModelSpaceMDSNew = FunMDS(loopThroughMetrics)
ModelSpaceTSNENew = FunTsne(loopThroughMetrics)
ModelSpaceTSNENew = ModelSpaceTSNENew.tolist()
return 'Everything Okay'
@app.route('/data/UpdateOverv', methods=["GET", "POST"])
def UpdateOverview():
ResultsUpdateOverview = []
ResultsUpdateOverview.append(sumPerClassifierSel)
ResultsUpdateOverview.append(ModelSpaceMDSNew)
ResultsUpdateOverview.append(ModelSpaceTSNENew)
ResultsUpdateOverview.append(metricsPerModel)
response = {
'Results': ResultsUpdateOverview
}
return jsonify(response)
def PreprocessingMetrics():
dicKNN = json.loads(allParametersPerformancePerModel[6])
dicSVC = json.loads(allParametersPerformancePerModel[15])
dicGausNB = json.loads(allParametersPerformancePerModel[24])
dicMLP = json.loads(allParametersPerformancePerModel[33])
dicLR = json.loads(allParametersPerformancePerModel[42])
dicLDA = json.loads(allParametersPerformancePerModel[51])
dicQDA = json.loads(allParametersPerformancePerModel[60])
dicRF = json.loads(allParametersPerformancePerModel[69])
dicExtraT = json.loads(allParametersPerformancePerModel[78])
dicAdaB = json.loads(allParametersPerformancePerModel[87])
dicGradB = json.loads(allParametersPerformancePerModel[96])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatMetrics = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_concatMetrics
def PreprocessingPred():
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
predictions = []
for column, content in df_concatProbs.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictions.append(el)
return predictions
def PreprocessingPredUpdate(Models):
Models = json.loads(Models)
ModelsList= []
for loop in Models['ClassifiersList']:
ModelsList.append(loop)
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
listProbs = df_concatProbs.index.values.tolist()
deletedElements = 0
for index, element in enumerate(listProbs):
if element in ModelsList:
index = index - deletedElements
df_concatProbs = df_concatProbs.drop(df_concatProbs.index[index])
deletedElements = deletedElements + 1
df_concatProbsCleared = df_concatProbs
listIDsRemoved = df_concatProbsCleared.index.values.tolist()
predictionsAll = PreprocessingPred()
PredictionSpaceAll = FunMDS(predictionsAll)
PredictionSpaceAllComb = [list(a) for a in zip(PredictionSpaceAll[0], PredictionSpaceAll[1])]
predictionsSel = []
for column, content in df_concatProbsCleared.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictionsSel.append(el)
PredictionSpaceSel = FunMDS(predictionsSel)
PredictionSpaceSelComb = [list(a) for a in zip(PredictionSpaceSel[0], PredictionSpaceSel[1])]
mtx2PredFinal = []
mtx2Pred, mtx2Pred, disparityPred = procrustes(PredictionSpaceAllComb, PredictionSpaceSelComb)
a1, b1 = zip(*mtx2Pred)
mtx2PredFinal.append(a1)
mtx2PredFinal.append(b1)
return [mtx2PredFinal,listIDsRemoved]
def PreprocessingParam():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
dicGausNB = dicGausNB['params']
dicMLP = dicMLP['params']
dicLR = dicLR['params']
dicLDA = dicLDA['params']
dicQDA = dicQDA['params']
dicRF = dicRF['params']
dicExtraT = dicExtraT['params']
dicAdaB = dicAdaB['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicSVC = {int(k):v for k,v in dicSVC.items()}
dicGausNB = {int(k):v for k,v in dicGausNB.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicLDA = {int(k):v for k,v in dicLDA.items()}
dicQDA = {int(k):v for k,v in dicQDA.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicExtraT = {int(k):v for k,v in dicExtraT.items()}
dicAdaB = {int(k):v for k,v in dicAdaB.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN = dfKNN.T
dfSVC = dfSVC.T
dfGausNB = dfGausNB.T
dfMLP = dfMLP.T
dfLR = dfLR.T
dfLDA = dfLDA.T
dfQDA = dfQDA.T
dfRF = dfRF.T
dfExtraT = dfExtraT.T
dfAdaB = dfAdaB.T
dfGradB = dfGradB.T
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_params = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_params
def PreprocessingParamSep():
dicKNN = json.loads(allParametersPerformancePerModel[1])
dicSVC = json.loads(allParametersPerformancePerModel[10])
dicGausNB = json.loads(allParametersPerformancePerModel[19])
dicMLP = json.loads(allParametersPerformancePerModel[28])
dicLR = json.loads(allParametersPerformancePerModel[37])
dicLDA = json.loads(allParametersPerformancePerModel[46])
dicQDA = json.loads(allParametersPerformancePerModel[55])
dicRF = json.loads(allParametersPerformancePerModel[64])
dicExtraT = json.loads(allParametersPerformancePerModel[73])
dicAdaB = json.loads(allParametersPerformancePerModel[82])
dicGradB = json.loads(allParametersPerformancePerModel[91])
dicKNN = dicKNN['params']
dicSVC = dicSVC['params']
dicGausNB = dicGausNB['params']
dicMLP = dicMLP['params']
dicLR = dicLR['params']
dicLDA = dicLDA['params']
dicQDA = dicQDA['params']
dicRF = dicRF['params']
dicExtraT = dicExtraT['params']
dicAdaB = dicAdaB['params']
dicGradB = dicGradB['params']
dicKNN = {int(k):v for k,v in dicKNN.items()}
dicSVC = {int(k):v for k,v in dicSVC.items()}
dicGausNB = {int(k):v for k,v in dicGausNB.items()}
dicMLP = {int(k):v for k,v in dicMLP.items()}
dicLR = {int(k):v for k,v in dicLR.items()}
dicLDA = {int(k):v for k,v in dicLDA.items()}
dicQDA = {int(k):v for k,v in dicQDA.items()}
dicRF = {int(k):v for k,v in dicRF.items()}
dicExtraT = {int(k):v for k,v in dicExtraT.items()}
dicAdaB = {int(k):v for k,v in dicAdaB.items()}
dicGradB = {int(k):v for k,v in dicGradB.items()}
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN = dfKNN.T
dfSVC = dfSVC.T
dfGausNB = dfGausNB.T
dfMLP = dfMLP.T
dfLR = dfLR.T
dfLDA = dfLDA.T
dfQDA = dfQDA.T
dfRF = dfRF.T
dfExtraT = dfExtraT.T
dfAdaB = dfAdaB.T
dfGradB = dfGradB.T
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
return [dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered]
def preProcessPerClassM():
dicKNN = json.loads(allParametersPerformancePerModel[2])
dicSVC = json.loads(allParametersPerformancePerModel[11])
dicGausNB = json.loads(allParametersPerformancePerModel[20])
dicMLP = json.loads(allParametersPerformancePerModel[29])
dicLR = json.loads(allParametersPerformancePerModel[38])
dicLDA = json.loads(allParametersPerformancePerModel[47])
dicQDA = json.loads(allParametersPerformancePerModel[56])
dicRF = json.loads(allParametersPerformancePerModel[65])
dicExtraT = json.loads(allParametersPerformancePerModel[74])
dicAdaB = json.loads(allParametersPerformancePerModel[83])
dicGradB = json.loads(allParametersPerformancePerModel[92])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatParams = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_concatParams
def preProcessFeatAcc():
dicKNN = json.loads(allParametersPerformancePerModel[3])
dicSVC = json.loads(allParametersPerformancePerModel[12])
dicGausNB = json.loads(allParametersPerformancePerModel[21])
dicMLP = json.loads(allParametersPerformancePerModel[30])
dicLR = json.loads(allParametersPerformancePerModel[39])
dicLDA = json.loads(allParametersPerformancePerModel[48])
dicQDA = json.loads(allParametersPerformancePerModel[57])
dicRF = json.loads(allParametersPerformancePerModel[66])
dicExtraT = json.loads(allParametersPerformancePerModel[75])
dicAdaB = json.loads(allParametersPerformancePerModel[84])
dicGradB = json.loads(allParametersPerformancePerModel[93])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_featAcc = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_featAcc
def preProcessPerm():
dicKNN = json.loads(allParametersPerformancePerModel[4])
dicSVC = json.loads(allParametersPerformancePerModel[13])
dicGausNB = json.loads(allParametersPerformancePerModel[22])
dicMLP = json.loads(allParametersPerformancePerModel[31])
dicLR = json.loads(allParametersPerformancePerModel[40])
dicLDA = json.loads(allParametersPerformancePerModel[49])
dicQDA = json.loads(allParametersPerformancePerModel[58])
dicRF = json.loads(allParametersPerformancePerModel[67])
dicExtraT = json.loads(allParametersPerformancePerModel[76])
dicAdaB = json.loads(allParametersPerformancePerModel[85])
dicGradB = json.loads(allParametersPerformancePerModel[94])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_perm = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_perm
def preProcessFeatSc():
dicKNN = json.loads(allParametersPerformancePerModel[5])
dfKNN = pd.DataFrame.from_dict(dicKNN)
return dfKNN
# remove that maybe!
def preProcsumPerMetric(factors):
sumPerClassifier = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
loopThroughMetrics.loc[:, 'log_loss'] = 1 - loopThroughMetrics.loc[:, 'log_loss']
for row in loopThroughMetrics.iterrows():
rowSum = 0
name, values = row
for loop, elements in enumerate(values):
rowSum = elements*factors[loop] + rowSum
if sum(factors) == 0:
sumPerClassifier = 0
else:
sumPerClassifier.append(rowSum/sum(factors) * 100)
return sumPerClassifier
def preProcMetricsAllAndSel():
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
global factors
metricsPerModelColl = []
metricsPerModelColl.append(loopThroughMetrics['mean_test_accuracy'])
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_micro'])
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_macro'])
metricsPerModelColl.append(loopThroughMetrics['geometric_mean_score_weighted'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_micro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_macro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_precision_weighted'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_micro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_macro'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_recall_weighted'])
metricsPerModelColl.append(loopThroughMetrics['f5_micro'])
metricsPerModelColl.append(loopThroughMetrics['f5_macro'])
metricsPerModelColl.append(loopThroughMetrics['f5_weighted'])
metricsPerModelColl.append(loopThroughMetrics['f1_micro'])
metricsPerModelColl.append(loopThroughMetrics['f1_macro'])
metricsPerModelColl.append(loopThroughMetrics['f1_weighted'])
metricsPerModelColl.append(loopThroughMetrics['f2_micro'])
metricsPerModelColl.append(loopThroughMetrics['f2_macro'])
metricsPerModelColl.append(loopThroughMetrics['f2_weighted'])
metricsPerModelColl.append(loopThroughMetrics['matthews_corrcoef'])
metricsPerModelColl.append(loopThroughMetrics['mean_test_roc_auc_ovo_weighted'])
metricsPerModelColl.append(loopThroughMetrics['log_loss'])
f=lambda a: (abs(a)+a)/2
for index, metric in enumerate(metricsPerModelColl):
if (index == 19):
metricsPerModelColl[index] = ((f(metric))*factors[index]) * 100
elif (index == 21):
metricsPerModelColl[index] = ((1 - metric)*factors[index] ) * 100
else:
metricsPerModelColl[index] = (metric*factors[index]) * 100
metricsPerModelColl[index] = metricsPerModelColl[index].to_json()
return metricsPerModelColl
def preProceModels():
models = KNNModels + SVCModels + GausNBModels + MLPModels + LRModels + LDAModels + QDAModels + RFModels + ExtraTModels + AdaBModels + GradBModels
return models
def FunMDS (data):
mds = MDS(n_components=2, random_state=RANDOM_SEED)
XTransformed = mds.fit_transform(data).T
XTransformed = XTransformed.tolist()
return XTransformed
def FunTsne (data):
tsne = TSNE(n_components=2, random_state=RANDOM_SEED).fit_transform(data)
tsne.shape
return tsne
def FunUMAP (data):
trans = umap.UMAP(n_neighbors=15, random_state=RANDOM_SEED).fit(data)
Xpos = trans.embedding_[:, 0].tolist()
Ypos = trans.embedding_[:, 1].tolist()
return [Xpos,Ypos]
def InitializeEnsemble():
XModels = PreprocessingMetrics()
global ModelSpaceMDS
global ModelSpaceTSNE
global allParametersPerformancePerModel
global impDataInst
XModels = XModels.fillna(0)
ModelSpaceMDS = FunMDS(XModels)
ModelSpaceTSNE = FunTsne(XModels)
ModelSpaceTSNE = ModelSpaceTSNE.tolist()
ModelSpaceUMAP = FunUMAP(XModels)
PredictionProbSel = PreprocessingPred()
PredictionSpaceMDS = FunMDS(PredictionProbSel)
PredictionSpaceTSNE = FunTsne(PredictionProbSel)
PredictionSpaceTSNE = PredictionSpaceTSNE.tolist()
PredictionSpaceUMAP = FunUMAP(PredictionProbSel)
ModelsIDs = preProceModels()
impDataInst = processDataInstance(ModelsIDs,allParametersPerformancePerModel)
callPreResults()
key = 0
EnsembleModel(ModelsIDs, key)
ReturnResults(ModelSpaceMDS,ModelSpaceTSNE,ModelSpaceUMAP,PredictionSpaceMDS,PredictionSpaceTSNE,PredictionSpaceUMAP)
def processDataInstance(ModelsIDs, allParametersPerformancePerModel):
dicKNN = json.loads(allParametersPerformancePerModel[8])
dicKNN = json.loads(dicKNN)
dicSVC = json.loads(allParametersPerformancePerModel[17])
dicSVC = json.loads(dicSVC)
dicGausNB = json.loads(allParametersPerformancePerModel[26])
dicGausNB = json.loads(dicGausNB)
dicMLP = json.loads(allParametersPerformancePerModel[35])
dicMLP = json.loads(dicMLP)
dicLR = json.loads(allParametersPerformancePerModel[44])
dicLR = json.loads(dicLR)
dicLDA = json.loads(allParametersPerformancePerModel[53])
dicLDA = json.loads(dicLDA)
dicQDA = json.loads(allParametersPerformancePerModel[62])
dicQDA = json.loads(dicQDA)
dicRF = json.loads(allParametersPerformancePerModel[71])
dicRF = json.loads(dicRF)
dicExtraT = json.loads(allParametersPerformancePerModel[80])
dicExtraT = json.loads(dicExtraT)
dicAdaB = json.loads(allParametersPerformancePerModel[89])
dicAdaB = json.loads(dicAdaB)
dicGradB = json.loads(allParametersPerformancePerModel[98])
dicGradB = json.loads(dicGradB)
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_connect = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
global yData
global filterActionFinal
global dataSpacePointsIDs
lengthDF = len(df_connect.columns)
if (filterActionFinal == 'compose'):
getList = []
for index, row in df_connect.iterrows():
yDataSelected = []
for column in row[dataSpacePointsIDs]:
yDataSelected.append(column)
storeMode = mode(yDataSelected)
getList.append(storeMode)
df_connect[str(lengthDF)] = getList
countCorrect = []
length = len(df_connect.index)
for index, element in enumerate(yData):
countTemp = 0
dfPart = df_connect[[str(index)]]
for indexdf, row in dfPart.iterrows():
if (int(row.values[0]) == int(element)):
countTemp += 1
countCorrect.append(1 - (countTemp/length))
return countCorrect
def ReturnResults(ModelSpaceMDS,ModelSpaceTSNE,ModelSpaceUMAP,PredictionSpaceMDS,PredictionSpaceTSNE,PredictionSpaceUMAP):
global Results
global AllTargets
Results = []
parametersGen = PreprocessingParam()
PerClassMetrics = preProcessPerClassM()
FeatureAccuracy = preProcessFeatAcc()
perm_imp_eli5PDCon = preProcessPerm()
featureScoresCon = preProcessFeatSc()
metricsPerModel = preProcMetricsAllAndSel()
sumPerClassifier = preProcsumPerMetric(factors)
ModelsIDs = preProceModels()
parametersGenPD = parametersGen.to_json(orient='records')
PerClassMetrics = PerClassMetrics.to_json(orient='records')
FeatureAccuracy = FeatureAccuracy.to_json(orient='records')
perm_imp_eli5PDCon = perm_imp_eli5PDCon.to_json(orient='records')
featureScoresCon = featureScoresCon.to_json(orient='records')
XDataJSONEntireSet = XData.to_json(orient='records')
XDataJSON = XData.columns.tolist()
Results.append(json.dumps(sumPerClassifier)) # Position: 0
Results.append(json.dumps(ModelSpaceMDS)) # Position: 1
Results.append(json.dumps(parametersGenPD)) # Position: 2
Results.append(PerClassMetrics) # Position: 3
Results.append(json.dumps(target_names)) # Position: 4
Results.append(FeatureAccuracy) # Position: 5
Results.append(json.dumps(XDataJSON)) # Position: 6
Results.append(0) # Position: 7
Results.append(json.dumps(PredictionSpaceMDS)) # Position: 8
Results.append(json.dumps(metricsPerModel)) # Position: 9
Results.append(perm_imp_eli5PDCon) # Position: 10
Results.append(featureScoresCon) # Position: 11
Results.append(json.dumps(ModelSpaceTSNE)) # Position: 12
Results.append(json.dumps(ModelsIDs)) # Position: 13
Results.append(json.dumps(XDataJSONEntireSet)) # Position: 14
Results.append(json.dumps(yData)) # Position: 15
Results.append(json.dumps(AllTargets)) # Position: 16
Results.append(json.dumps(ModelSpaceUMAP)) # Position: 17
Results.append(json.dumps(PredictionSpaceTSNE)) # Position: 18
Results.append(json.dumps(PredictionSpaceUMAP)) # Position: 19
return Results
# Sending the overview classifiers' results to be visualized as a scatterplot
@app.route('/data/PlotClassifiers', methods=["GET", "POST"])
def SendToPlot():
while (len(DataResultsRaw) != DataRawLength):
pass
InitializeEnsemble()
response = {
'OverviewResults': Results
}
return jsonify(response)
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRemoveFromStack', methods=["GET", "POST"])
def RetrieveSelClassifiersIDandRemoveFromStack():
ClassifierIDsList = request.get_data().decode('utf8').replace("'", '"')
PredictionProbSelUpdate = PreprocessingPredUpdate(ClassifierIDsList)
global resultsUpdatePredictionSpace
resultsUpdatePredictionSpace = []
resultsUpdatePredictionSpace.append(json.dumps(PredictionProbSelUpdate[0])) # Position: 0
resultsUpdatePredictionSpace.append(json.dumps(PredictionProbSelUpdate[1]))
key = 3
EnsembleModel(ClassifierIDsList, key)
return 'Everything Okay'
# Sending the overview classifiers' results to be visualized as a scatterplot
@app.route('/data/UpdatePredictionsSpace', methods=["GET", "POST"])
def SendPredBacktobeUpdated():
response = {
'UpdatePredictions': resultsUpdatePredictionSpace
}
return jsonify(response)
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelPoin', methods=["GET", "POST"])
def RetrieveSelClassifiersID():
ClassifierIDsList = request.get_data().decode('utf8').replace("'", '"')
#ComputeMetricsForSel(ClassifierIDsList)
ClassifierIDCleaned = json.loads(ClassifierIDsList)
global keySpecInternal
keySpecInternal = 1
keySpecInternal = ClassifierIDCleaned['keyNow']
EnsembleModel(ClassifierIDsList, 1)
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelPoinLocally', methods=["GET", "POST"])
def RetrieveSelClassifiersIDLocally():
ClassifierIDsList = request.get_data().decode('utf8').replace("'", '"')
ComputeMetricsForSel(ClassifierIDsList)
return 'Everything Okay'
def ComputeMetricsForSel(Models):
Models = json.loads(Models)
MetricsAlltoSel = PreprocessingMetrics()
listofModels = []
for loop in Models['ClassifiersList']:
listofModels.append(loop)
MetricsAlltoSel = MetricsAlltoSel.loc[listofModels,:]
global metricsPerModelCollSel
global factors
metricsPerModelCollSel = []
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_accuracy'])
metricsPerModelCollSel.append(MetricsAlltoSel['geometric_mean_score_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['geometric_mean_score_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['geometric_mean_score_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_precision_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_precision_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_precision_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_recall_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_recall_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_recall_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['f5_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f5_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f5_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['f1_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f1_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f1_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['f2_micro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f2_macro'])
metricsPerModelCollSel.append(MetricsAlltoSel['f2_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['matthews_corrcoef'])
metricsPerModelCollSel.append(MetricsAlltoSel['mean_test_roc_auc_ovo_weighted'])
metricsPerModelCollSel.append(MetricsAlltoSel['log_loss'])
f=lambda a: (abs(a)+a)/2
for index, metric in enumerate(metricsPerModelCollSel):
if (index == 19):
metricsPerModelCollSel[index] = ((f(metric))*factors[index]) * 100
elif (index == 21):
metricsPerModelCollSel[index] = (1 - metric)*factors[index] * 100
else:
metricsPerModelCollSel[index] = metric*factors[index] * 100
metricsPerModelCollSel[index] = metricsPerModelCollSel[index].to_json()
return 'okay'
# function to get unique values
def unique(list1):
# intilize a null list
unique_list = []
# traverse for all elements
for x in list1:
# check if exists in unique_list or not
if x not in unique_list:
unique_list.append(x)
return unique_list
# Sending the overview classifiers' results to be visualized as a scatterplot
@app.route('/data/BarChartSelectedModels', methods=["GET", "POST"])
def SendToUpdateBarChart():
response = {
'SelectedMetricsForModels': metricsPerModelCollSel
}
return jsonify(response)
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestDataPoint', methods=["GET", "POST"])
def RetrieveSelDataPoints():
DataPointsSel = request.get_data().decode('utf8').replace("'", '"')
DataPointsSelClear = json.loads(DataPointsSel)
listofDataPoints = []
for loop in DataPointsSelClear['DataPointsSel']:
temp = [int(s) for s in re.findall(r'\b\d+\b', loop)]
listofDataPoints.append(temp[0])
global algorithmsList
global resultsMetrics
resultsMetrics = []
df_concatMetrics = []
metricsSelList = []
paramsListSepPD = []
paramsListSepPD = PreprocessingParamSep()
paramsListSeptoDicKNN = paramsListSepPD[0].to_dict(orient='list')
paramsListSeptoDicSVC = paramsListSepPD[1].to_dict(orient='list')
paramsListSeptoDicGausNB = paramsListSepPD[2].to_dict(orient='list')
paramsListSeptoDicMLP = paramsListSepPD[3].to_dict(orient='list')
paramsListSeptoDicLR = paramsListSepPD[4].to_dict(orient='list')
paramsListSeptoDicLDA = paramsListSepPD[5].to_dict(orient='list')
paramsListSeptoDicQDA = paramsListSepPD[6].to_dict(orient='list')
paramsListSeptoDicRF = paramsListSepPD[7].to_dict(orient='list')
paramsListSeptoDicExtraT = paramsListSepPD[8].to_dict(orient='list')
paramsListSeptoDicAdaB = paramsListSepPD[9].to_dict(orient='list')
paramsListSeptoDicGradB = paramsListSepPD[10].to_dict(orient='list')
RetrieveParamsCleared = {}
RetrieveParamsClearedListKNN = []
for key, value in paramsListSeptoDicKNN.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListKNN.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListSVC = []
for key, value in paramsListSeptoDicSVC.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListSVC.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListGausNB = []
for key, value in paramsListSeptoDicGausNB.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListGausNB.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListMLP = []
for key, value in paramsListSeptoDicMLP.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListMLP.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListLR = []
for key, value in paramsListSeptoDicLR.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListLR.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListLDA = []
for key, value in paramsListSeptoDicLDA.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListLDA.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListQDA = []
for key, value in paramsListSeptoDicQDA.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListQDA.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListRF = []
for key, value in paramsListSeptoDicRF.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListRF.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListExtraT = []
for key, value in paramsListSeptoDicExtraT.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListExtraT.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListAdaB = []
for key, value in paramsListSeptoDicAdaB.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListAdaB.append(RetrieveParamsCleared)
RetrieveParamsCleared = {}
RetrieveParamsClearedListGradB = []
for key, value in paramsListSeptoDicGradB.items():
withoutDuplicates = Remove(value)
RetrieveParamsCleared[key] = withoutDuplicates
RetrieveParamsClearedListGradB.append(RetrieveParamsCleared)
if (len(paramsListSeptoDicKNN['n_neighbors']) == 0):
RetrieveParamsClearedListKNN = []
if (len(paramsListSeptoDicSVC['C']) == 0):
RetrieveParamsClearedListSVC = []
if (len(paramsListSeptoDicGausNB['var_smoothing']) == 0):
RetrieveParamsClearedListGausNB = []
if (len(paramsListSeptoDicMLP['alpha']) == 0):
RetrieveParamsClearedListMLP = []
if (len(paramsListSeptoDicLR['C']) == 0):
RetrieveParamsClearedListLR = []
if (len(paramsListSeptoDicLDA['shrinkage']) == 0):
RetrieveParamsClearedListLDA = []
if (len(paramsListSeptoDicQDA['reg_param']) == 0):
RetrieveParamsClearedListQDA = []
if (len(paramsListSeptoDicRF['n_estimators']) == 0):
RetrieveParamsClearedListRF = []
if (len(paramsListSeptoDicExtraT['n_estimators']) == 0):
RetrieveParamsClearedListExtraT = []
if (len(paramsListSeptoDicAdaB['n_estimators']) == 0):
RetrieveParamsClearedListAdaB = []
if (len(paramsListSeptoDicGradB['n_estimators']) == 0):
RetrieveParamsClearedListGradB = []
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = RetrieveParamsClearedListKNN
AlgorithmsIDsEnd = 0
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True,random_state=RANDOM_SEED)
params = RetrieveParamsClearedListSVC
AlgorithmsIDsEnd = SVCModelsCount
elif (eachAlgor) == 'GauNB':
clf = GaussianNB()
params = RetrieveParamsClearedListGausNB
AlgorithmsIDsEnd = GausNBModelsCount
elif (eachAlgor) == 'MLP':
clf = MLPClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListMLP
AlgorithmsIDsEnd = MLPModelsCount
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListLR
AlgorithmsIDsEnd = LRModelsCount
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = RetrieveParamsClearedListLDA
AlgorithmsIDsEnd = LDAModelsCount
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = RetrieveParamsClearedListQDA
AlgorithmsIDsEnd = QDAModelsCount
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListRF
AlgorithmsIDsEnd = RFModelsCount
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListExtraT
AlgorithmsIDsEnd = ExtraTModelsCount
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListAdaB
AlgorithmsIDsEnd = AdaBModelsCount
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = RetrieveParamsClearedListGradB
AlgorithmsIDsEnd = GradBModelsCount
metricsSelList = GridSearchSel(clf, params, factors, AlgorithmsIDsEnd, listofDataPoints, crossValidation)
if (len(metricsSelList[0]) != 0 and len(metricsSelList[1]) != 0 and len(metricsSelList[2]) != 0 and len(metricsSelList[3]) != 0 and len(metricsSelList[4]) != 0 and len(metricsSelList[5]) != 0 and len(metricsSelList[6]) != 0 and len(metricsSelList[7]) != 0 and len(metricsSelList[8]) != 0 and len(metricsSelList[9]) != 0 and len(metricsSelList[10]) != 0):
dicKNN = json.loads(metricsSelList[0])
dfKNN = pd.DataFrame.from_dict(dicKNN)
parametersSelDataPD = parametersSelData[0].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[0], paramsListSepPD[0]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfKNNCleared = dfKNN
else:
dfKNNCleared = dfKNN.drop(dfKNN.index[set_diff_df])
dicSVC = json.loads(metricsSelList[1])
dfSVC = pd.DataFrame.from_dict(dicSVC)
parametersSelDataPD = parametersSelData[1].apply(pd.Series)
set_diff_df = pd.concat([parametersSelDataPD, paramsListSepPD[1], paramsListSepPD[1]]).drop_duplicates(keep=False)
set_diff_df = set_diff_df.index.tolist()
if (len(set_diff_df) == 0):
dfSVCCleared = dfSVC
else:
dfSVCCleared = dfSVC.drop(dfSVC.index[set_diff_df])
dicGausNB = json.loads(metricsSelList[2])
dfGausNB = | pd.DataFrame.from_dict(dicGausNB) | pandas.DataFrame.from_dict |
import pandas as pd
import numpy as np
import os
from tqdm import tqdm
from vtkplotter import ProgressBar, shapes, merge, load
from vtkplotter.mesh import Mesh as Actor
from morphapi.morphology.morphology import Neuron
import brainrender
from brainrender.Utils.data_io import load_mesh_from_file, load_json
from brainrender.Utils.data_manipulation import get_coords, flatten_list, is_any_item_in_list
from brainrender.morphology.utils import edit_neurons, get_neuron_actors_with_morphapi
from brainrender import STREAMLINES_RESOLUTION, INJECTION_VOLUME_SIZE
from brainrender.Utils.webqueries import request
from brainrender import *
from brainrender.Utils import actors_funcs
from brainrender.colors import _mapscales_cmaps, makePalette, get_random_colors, getColor, colors, colorMap, check_colors
from brainrender.colors import get_n_shades_of
from allensdk.core.mouse_connectivity_cache import MouseConnectivityCache
from allensdk.api.queries.ontologies_api import OntologiesApi
from allensdk.api.queries.reference_space_api import ReferenceSpaceApi
from allensdk.api.queries.mouse_connectivity_api import MouseConnectivityApi
from allensdk.api.queries.tree_search_api import TreeSearchApi
from allensdk.core.reference_space_cache import ReferenceSpaceCache
from brainrender.atlases.base import Atlas
class ABA(Atlas):
"""
This class handles interaction with the Allen Brain Atlas datasets and APIs to get structure trees,
experimental metadata and results, tractography data etc.
"""
ignore_regions = ['retina', 'brain', 'fiber tracts', 'grey'] # ignored when rendering
# useful vars for analysis
excluded_regions = ["fiber tracts"]
resolution = 25
_root_bounds = [[-17, 13193],
[ 134, 7564],
[486, 10891]]
_root_midpoint = [np.mean([-17, 13193]),
np.mean([134, 7564]),
np.mean([486, 10891])]
atlas_name = "ABA"
mesh_format = 'obj'
base_url = "https://neuroinformatics.nl/HBP/allen-connectivity-viewer/json/streamlines_NNN.json.gz"
# Used for streamlines
def __init__(self, base_dir=None, **kwargs):
"""
Set up file paths and Allen SDKs
:param base_dir: path to directory to use for saving data (default value None)
:param kwargs: can be used to pass path to individual data folders. See brainrender/Utils/paths_manager.py
"""
Atlas.__init__(self, base_dir=base_dir, **kwargs)
self.meshes_folder = self.mouse_meshes # where the .obj mesh for each region is saved
# get mouse connectivity cache and structure tree
self.mcc = MouseConnectivityCache(manifest_file=os.path.join(self.mouse_connectivity_cache, "manifest.json"))
self.structure_tree = self.mcc.get_structure_tree()
# get ontologies API and brain structures sets
self.oapi = OntologiesApi()
self.get_structures_sets()
# get reference space
self.space = ReferenceSpaceApi()
self.spacecache = ReferenceSpaceCache(
manifest=os.path.join(self.annotated_volume_fld, "manifest.json"), # downloaded files are stored relative to here
resolution=self.resolution,
reference_space_key="annotation/ccf_2017" # use the latest version of the CCF
)
self.annotated_volume, _ = self.spacecache.get_annotation_volume()
# mouse connectivity API [used for tractography]
self.mca = MouseConnectivityApi()
# Get tree search api
self.tree_search = TreeSearchApi()
# Store all regions metadata [If there's internet connection]
if self.other_sets is not None:
self.regions = self.other_sets["Structures whose surfaces are represented by a precomputed mesh"].sort_values('acronym')
self.region_acronyms = list(self.other_sets["Structures whose surfaces are represented by a precomputed mesh"].sort_values(
'acronym').acronym.values)
# ---------------------------------------------------------------------------- #
# Methods to support Scene creation #
# ---------------------------------------------------------------------------- #
"""
These methods are used by brainrender.scene to populate a scene using
the Allen brain atlas meshes. They overwrite methods of the base atlas class
"""
# ------------------------- Adding elements to scene ------------------------- #
def get_brain_regions(self, brain_regions, VIP_regions=None, VIP_color=None,
add_labels=False,
colors=None, use_original_color=True,
alpha=None, hemisphere=None, verbose=False, **kwargs):
"""
Gets brain regions meshes for rendering
Many parameters can be passed to specify how the regions should be rendered.
To treat a subset of the rendered regions, specify which regions are VIP.
Use the kwargs to specify more detailes on how the regins should be rendered (e.g. wireframe look)
:param brain_regions: str list of acronyms of brain regions
:param VIP_regions: if a list of brain regions are passed, these are rendered differently compared to those in brain_regions (Default value = None)
:param VIP_color: if passed, this color is used for the VIP regions (Default value = None)
:param colors: str, color of rendered brian regions (Default value = None)
:param use_original_color: bool, if True, the allen's default color for the region is used. (Default value = False)
:param alpha: float, transparency of the rendered brain regions (Default value = None)
:param hemisphere: str (Default value = None)
:param add_labels: bool (default False). If true a label is added to each regions' actor. The label is visible when hovering the mouse over the actor
:param **kwargs: used to determine a bunch of thigs, including the look and location of lables from scene.add_labels
"""
# Check that the atlas has brain regions data
if self.region_acronyms is None:
print(f"The atlas {self.atlas_name} has no brain regions data")
return
# Parse arguments
if VIP_regions is None:
VIP_regions = brainrender.DEFAULT_VIP_REGIONS
if VIP_color is None:
VIP_color = brainrender.DEFAULT_VIP_COLOR
if alpha is None:
_alpha = brainrender.DEFAULT_STRUCTURE_ALPHA
else: _alpha = alpha
# check that we have a list
if not isinstance(brain_regions, list):
brain_regions = [brain_regions]
# check the colors input is correct
if colors is not None:
if isinstance(colors[0], (list, tuple)):
if not len(colors) == len(brain_regions):
raise ValueError("when passing colors as a list, the number of colors must match the number of brain regions")
for col in colors:
if not check_colors(col): raise ValueError("Invalide colors in input: {}".format(col))
else:
if not check_colors(colors): raise ValueError("Invalide colors in input: {}".format(colors))
colors = [colors for i in range(len(brain_regions))]
# loop over all brain regions
actors = {}
for i, region in enumerate(brain_regions):
self._check_valid_region_arg(region)
if region in self.ignore_regions: continue
if verbose: print("Rendering: ({})".format(region))
# get the structure and check if we need to download the object file
if region not in self.region_acronyms:
print(f"The region {region} doesn't seem to belong to the atlas being used: {self.atlas_name}. Skipping")
continue
obj_file = os.path.join(self.meshes_folder, "{}.{}".format(region, self.mesh_format))
if not self._check_obj_file(region, obj_file):
print("Could not render {}, maybe we couldn't get the mesh?".format(region))
continue
# check which color to assign to the brain region
if use_original_color:
color = [x/255 for x in self.get_region_color(region)]
else:
if region in VIP_regions:
color = VIP_color
else:
if colors is None:
color = brainrender.DEFAULT_STRUCTURE_COLOR
elif isinstance(colors, list):
color = colors[i]
else:
color = colors
if region in VIP_regions:
alpha = 1
else:
alpha = _alpha
# Load the object file as a mesh and store the actor
if hemisphere is not None:
if hemisphere.lower() == "left" or hemisphere.lower() == "right":
obj = self.get_region_unilateral(region, hemisphere=hemisphere, color=color, alpha=alpha)
else:
raise ValueError(f'Invalid hemisphere argument: {hemisphere}')
else:
obj = load(obj_file, c=color, alpha=alpha)
if obj is not None:
actors_funcs.edit_actor(obj, **kwargs)
actors[region] = obj
else:
print(f"Something went wrong while loading mesh data for {region}")
return actors
@staticmethod # static method because this should inherit from scene
def add_neurons(self, neurons, color=None, display_axon=True, display_dendrites=True,
alpha=1, neurite_radius=None):
"""
Adds rendered morphological data of neurons reconstructions downloaded from the
Mouse Light project at Janelia (or other sources).
Accepts neurons argument as:
- file(s) with morphological data
- vtkplotter mesh actor(s) of entire neurons reconstructions
- dictionary or list of dictionary with actors for different neuron parts
:param self: instance of brainrender Scene to use to render neurons
:param neurons: str, list, dict. File(s) with neurons data or list of rendered neurons.
:param display_axon, display_dendrites: if set to False the corresponding neurite is not rendered
:param color: default None. Can be:
- None: each neuron is given a random color
- color: rbg, hex etc. If a single color is passed all neurons will have that color
- cmap: str with name of a colormap: neurons are colored based on their sequential order and cmap
- dict: a dictionary specifying a color for soma, dendrites and axon actors, will be the same for all neurons
- list: a list of length = number of neurons with either a single color for each neuron
or a dictionary of colors for each neuron
:param alpha: float in range 0,1. Neurons transparency
:param neurite_radius: float > 0 , radius of tube actor representing neurites
"""
if not isinstance(neurons, (list, tuple)):
neurons = [neurons]
# ------------------------------ Prepare colors ------------------------------ #
N = len(neurons)
colors = dict(
soma = None,
axon = None,
dendrites = None,
)
# If no color is passed, get random colors
if color is None:
cols = get_random_colors(N)
colors = dict(
soma = cols.copy(),
axon = cols.copy(),
dendrites = cols.copy(),)
else:
if isinstance(color, str):
# Deal with a a cmap being passed
if color in _mapscales_cmaps:
cols = [colorMap(n, name=color, vmin=-2, vmax=N+2) for n in np.arange(N)]
colors = dict(
soma = cols.copy(),
axon = cols.copy(),
dendrites = cols.copy(),)
else:
# Deal with a single color being passed
cols = [getColor(color) for n in np.arange(N)]
colors = dict(
soma = cols.copy(),
axon = cols.copy(),
dendrites = cols.copy(),)
elif isinstance(color, dict):
# Deal with a dictionary with color for each component
if not 'soma' in color.keys():
raise ValueError(f"When passing a dictionary as color argument, \
soma should be one fo the keys: {color}")
dendrites_color = color.pop('dendrites', color['soma'])
axon_color = color.pop('axon', color['soma'])
colors = dict(
soma = [color['soma'] for n in np.arange(N)],
axon = [axon_color for n in np.arange(N)],
dendrites = [dendrites_color for n in np.arange(N)],)
elif isinstance(color, (list, tuple)):
# Check that the list content makes sense
if len(color) != N:
raise ValueError(f"When passing a list of color arguments, the list length"+
f" ({len(color)}) should match the number of neurons ({N}).")
if len(set([type(c) for c in color])) > 1:
raise ValueError(f"When passing a list of color arguments, all list elements"+
" should have the same type (e.g. str or dict)")
if isinstance(color[0], dict):
# Deal with a list of dictionaries
soma_colors, dendrites_colors, axon_colors = [], [], []
for col in colors:
if not 'soma' in col.keys():
raise ValueError(f"When passing a dictionary as col argument, \
soma should be one fo the keys: {col}")
dendrites_colors.append(col.pop('dendrites', col['soma']))
axon_colors.append(col.pop('axon', col['soma']))
soma_colors.append(col['soma'])
colors = dict(
soma = soma_colors,
axon = axon_colors,
dendrites = dendrites_colors,)
else:
# Deal with a list of colors
colors = dict(
soma = color.copy(),
axon = color.copy(),
dendrites = color.copy(),)
else:
raise ValueError(f"Color argument passed is not valid. Should be a \
str, dict, list or None, not {type(color)}:{color}")
# Check colors, if everything went well we should have N colors per entry
for k,v in colors.items():
if len(v) != N:
raise ValueError(f"Something went wrong while preparing colors. Not all \
entries have right length. We got: {colors}")
# ---------------------------------- Render ---------------------------------- #
_neurons_actors = []
for neuron in neurons:
neuron_actors = {'soma':None, 'dendrites':None, 'axon': None}
# Deal with neuron as filepath
if isinstance(neuron, str):
if os.path.isfile(neuron):
if neuron.endswith('.swc'):
neuron_actors, _ = get_neuron_actors_with_morphapi(swcfile=neuron, neurite_radius=neurite_radius)
else:
raise NotImplementedError('Currently we can only parse morphological reconstructions from swc files')
else:
raise ValueError(f"Passed neruon {neuron} is not a valid input. Maybe the file doesn't exist?")
# Deal with neuron as single actor
elif isinstance(neuron, Actor):
# A single actor was passed, maybe it's the entire neuron
neuron_actors['soma'] = neuron # store it as soma anyway
pass
# Deal with neuron as dictionary of actor
elif isinstance(neuron, dict):
neuron_actors['soma'] = neuron.pop('soma', None)
neuron_actors['axon'] = neuron.pop('axon', None)
# Get dendrites actors
if 'apical_dendrites' in neuron.keys() or 'basal_dendrites' in neuron.keys():
if 'apical_dendrites' not in neuron.keys():
neuron_actors['dendrites'] = neuron['basal_dendrites']
elif 'basal_dendrites' not in neuron.keys():
neuron_actors['dendrites'] = neuron['apical_dendrites']
else:
neuron_ctors['dendrites'] = merge(neuron['apical_dendrites'], neuron['basal_dendrites'])
else:
neuron_actors['dendrites'] = neuron.pop('dendrites', None)
# Deal with neuron as instance of Neuron from morphapi
elif isinstance(neuron, Neuron):
neuron_actors, _ = get_neuron_actors_with_morphapi(neuron=neuron)
# Deal with other inputs
else:
raise ValueError(f"Passed neuron {neuron} is not a valid input")
# Check that we don't have anything weird in neuron_actors
for key, act in neuron_actors.items():
if act is not None:
if not isinstance(act, Actor):
raise ValueError(f"Neuron actor {key} is {act.__type__} but should be a vtkplotter Mesh. Not: {act}")
if not display_axon:
neuron_actors['axon'] = None
if not display_dendrites:
neuron_actors['dendrites'] = None
_neurons_actors.append(neuron_actors)
# Color actors
for n, neuron in enumerate(_neurons_actors):
if neuron['axon'] is not None:
neuron['axon'].c(colors['axon'][n])
neuron['soma'].c(colors['soma'][n])
if neuron['dendrites'] is not None:
neuron['dendrites'].c(colors['dendrites'][n])
# Add to actors storage
self.actors["neurons"].extend(_neurons_actors)
# Return
if len(_neurons_actors) == 1:
return _neurons_actors[0]
elif not _neurons_actors:
return None
else:
return _neurons_actors
@staticmethod
def add_tractography(self, tractography, color=None, display_injection_structure=False,
display_onlyVIP_injection_structure=False, color_by="manual", others_alpha=1, verbose=True,
VIP_regions=[], VIP_color=None, others_color="white", include_all_inj_regions=False,
extract_region_from_inj_coords=False, display_injection_volume=True):
"""
Renders tractography data and adds it to the scene. A subset of tractography data can receive special treatment using the with VIP regions argument:
if the injection site for the tractography data is in a VIP regions, this is colored differently.
:param tractography: list of dictionaries with tractography data
:param color: color of rendered tractography data
:param display_injection_structure: Bool, if True the injection structure is rendered (Default value = False)
:param display_onlyVIP_injection_structure: bool if true displays the injection structure only for VIP regions (Default value = False)
:param color_by: str, specifies which criteria to use to color the tractography (Default value = "manual")
:param others_alpha: float (Default value = 1)
:param verbose: bool (Default value = True)
:param VIP_regions: list of brain regions with VIP treatement (Default value = [])
:param VIP_color: str, color to use for VIP data (Default value = None)
:param others_color: str, color for not VIP data (Default value = "white")
:param include_all_inj_regions: bool (Default value = False)
:param extract_region_from_inj_coords: bool (Default value = False)
:param display_injection_volume: float, if True a spehere is added to display the injection coordinates and volume (Default value = True)
"""
# check argument
if not isinstance(tractography, list):
if isinstance(tractography, dict):
tractography = [tractography]
else:
raise ValueError("the 'tractography' variable passed must be a list of dictionaries")
else:
if not isinstance(tractography[0], dict):
raise ValueError("the 'tractography' variable passed must be a list of dictionaries")
if not isinstance(VIP_regions, list):
raise ValueError("VIP_regions should be a list of acronyms")
# check coloring mode used and prepare a list COLORS to use for coloring stuff
if color_by == "manual":
# check color argument
if color is None:
color = TRACT_DEFAULT_COLOR
COLORS = [color for i in range(len(tractography))]
elif isinstance(color, list):
if not len(color) == len(tractography):
raise ValueError("If a list of colors is passed, it must have the same number of items as the number of tractography traces")
else:
for col in color:
if not check_colors(col): raise ValueError("Color variable passed to tractography is invalid: {}".format(col))
COLORS = color
else:
if not check_colors(color):
raise ValueError("Color variable passed to tractography is invalid: {}".format(color))
else:
COLORS = [color for i in range(len(tractography))]
elif color_by == "region":
COLORS = [self.atlas.get_region_color(t['structure-abbrev']) for t in tractography]
elif color_by == "target_region":
if VIP_color is not None:
if not check_colors(VIP_color) or not check_colors(others_color):
raise ValueError("Invalid VIP or other color passed")
try:
if include_all_inj_regions:
COLORS = [VIP_color if is_any_item_in_list( [x['abbreviation'] for x in t['injection-structures']], VIP_regions)\
else others_color for t in tractography]
else:
COLORS = [VIP_color if t['structure-abbrev'] in VIP_regions else others_color for t in tractography]
except:
raise ValueError("Something went wrong while getting colors for tractography")
else:
COLORS = [self.atlas.get_region_color(t['structure-abbrev']) if t['structure-abbrev'] in VIP_regions else others_color for t in tractography]
else:
raise ValueError("Unrecognised 'color_by' argument {}".format(color_by))
# add actors to represent tractography data
actors, structures_acronyms = [], []
if VERBOSE and verbose:
print("Structures found to be projecting to target: ")
# Loop over injection experiments
for i, (t, color) in enumerate(zip(tractography, COLORS)):
# Use allen metadata
if include_all_inj_regions:
inj_structures = [x['abbreviation'] for x in t['injection-structures']]
else:
inj_structures = [self.atlas.get_structure_parent(t['structure-abbrev'])['acronym']]
# show brain structures in which injections happened
if display_injection_structure:
if not is_any_item_in_list(inj_structures, list(self.actors['regions'].keys())):
if display_onlyVIP_injection_structure and is_any_item_in_list(inj_structures, VIP_regions):
self.add_brain_regions([t['structure-abbrev']], colors=color)
elif not display_onlyVIP_injection_structure:
self.add_brain_regions([t['structure-abbrev']], colors=color)
if VERBOSE and verbose and not is_any_item_in_list(inj_structures, structures_acronyms):
print(" -- ({})".format(t['structure-abbrev']))
structures_acronyms.append(t['structure-abbrev'])
# get tractography points and represent as list
if color_by == "target_region" and not is_any_item_in_list(inj_structures, VIP_regions):
alpha = others_alpha
else:
alpha = TRACTO_ALPHA
if alpha == 0:
continue # skip transparent ones
# check if we need to manually check injection coords
if extract_region_from_inj_coords:
try:
region = self.atlas.get_structure_from_coordinates(t['injection-coordinates'],
just_acronym=False)
if region is None: continue
inj_structures = [self.atlas.get_structure_parent(region['acronym'])['acronym']]
except:
raise ValueError(self.atlas.get_structure_from_coordinates(t['injection-coordinates'],
just_acronym=False))
if inj_structures is None: continue
elif isinstance(extract_region_from_inj_coords, list):
# check if injection coord are in one of the brain regions in list, otherwise skip
if not is_any_item_in_list(inj_structures, extract_region_from_inj_coords):
continue
# represent injection site as sphere
if display_injection_volume:
actors.append(shapes.Sphere(pos=t['injection-coordinates'],
c=color, r=INJECTION_VOLUME_SIZE*t['injection-volume'], alpha=TRACTO_ALPHA))
points = [p['coord'] for p in t['path']]
actors.append(shapes.Tube(points, r=TRACTO_RADIUS, c=color, alpha=alpha, res=TRACTO_RES))
self.actors['tracts'].extend(actors)
@staticmethod
def parse_streamline(*args, filepath=None, data=None, show_injection_site=True, color='ivory', alpha=.8, radius=10, **kwargs):
"""
Given a path to a .json file with streamline data (or the data themselves), render the streamline as tubes actors.
Either filepath or data should be passed
:param filepath: str, optional. Path to .json file with streamline data (Default value = None)
:param data: panadas.DataFrame, optional. DataFrame with streamline data. (Default value = None)
:param color: str color of the streamlines (Default value = 'ivory')
:param alpha: float transparency of the streamlines (Default value = .8)
:param radius: int radius of the streamlines actor (Default value = 10)
:param show_injection_site: bool, if True spheres are used to render the injection volume (Default value = True)
:param *args:
:param **kwargs:
"""
if filepath is not None and data is None:
data = load_json(filepath)
# data = {k:{int(k2):v2 for k2, v2 in v.items()} for k,v in data.items()}
elif filepath is None and data is not None:
pass
else:
raise ValueError("Need to pass eiteher a filepath or data argument to parse_streamline")
# create actors for streamlines
lines = []
if len(data['lines']) == 1:
lines_data = data['lines'][0]
else:
lines_data = data['lines']
for line in lines_data:
points = [[l['x'], l['y'], l['z']] for l in line]
lines.append(shapes.Tube(points, r=radius, c=color, alpha=alpha, res=STREAMLINES_RESOLUTION))
coords = []
if show_injection_site:
if len(data['injection_sites']) == 1:
injection_data = data['injection_sites'][0]
else:
injection_data = data['injection_sites']
for inj in injection_data:
coords.append(list(inj.values()))
spheres = [shapes.Spheres(coords, r=INJECTION_VOLUME_SIZE)]
else:
spheres = []
merged = merge(*lines, *spheres)
merged.color(color)
merged.alpha(alpha)
return [merged]
@staticmethod
def add_streamlines(self, sl_file, *args, colorby=None, color_each=False, **kwargs):
"""
Render streamline data downloaded from https://neuroinformatics.nl/HBP/allen-connectivity-viewer/streamline-downloader.html
:param sl_file: path to JSON file with streamliens data [or list of files]
:param colorby: str, criteria for how to color the streamline data (Default value = None)
:param color_each: bool, if True, the streamlines for each injection is colored differently (Default value = False)
:param *args:
:param **kwargs:
"""
color = None
if not color_each:
if colorby is not None:
try:
color = self.structure_tree.get_structures_by_acronym([colorby])[0]['rgb_triplet']
if "color" in kwargs.keys():
del kwargs["color"]
except:
raise ValueError("Could not extract color for region: {}".format(colorby))
else:
if colorby is not None:
color = kwargs.pop("color", None)
try:
get_n_shades_of(color, 1)
except:
raise ValueError("Invalide color argument: {}".format(color))
if isinstance(sl_file, list):
if isinstance(sl_file[0], (str, pd.DataFrame)): # we have a list of files to add
for slf in tqdm(sl_file):
if not color_each:
if color is not None:
if isinstance(slf, str):
streamlines = self.atlas.parse_streamline(filepath=slf, *args, color=color, **kwargs)
else:
streamlines = self.atlas.parse_streamline(data=slf, *args, color=color, **kwargs)
else:
if isinstance(slf, str):
streamlines = self.atlas.parse_streamline(filepath=slf, *args, **kwargs)
else:
streamlines = self.atlas.parse_streamline(data=slf, *args, **kwargs)
else:
if color is not None:
col = get_n_shades_of(color, 1)[0]
else:
col = get_random_colors(n_colors=1)
if isinstance(slf, str):
streamlines = self.atlas.parse_streamline(filepath=slf, color=col, *args, **kwargs)
else:
streamlines = self.atlas.parse_streamline(data= slf, color=col, *args, **kwargs)
self.actors['tracts'].extend(streamlines)
else:
raise ValueError("unrecognized argument sl_file: {}".format(sl_file))
else:
if not isinstance(sl_file, (str, pd.DataFrame)):
raise ValueError("unrecognized argument sl_file: {}".format(sl_file))
if not color_each:
if isinstance(sl_file, str):
streamlines = parse_streamline(filepath=sl_file, *args, **kwargs)
else:
streamlines = parse_streamline(data=sl_file, *args, **kwargs)
else:
if color is not None:
col = get_n_shades_of(color, 1)[0]
else:
col = get_random_colors(n_colors=1)
if isinstance(sl_file, str):
streamlines = parse_streamline(filepath=sl_file, color=col, *args, **kwargs)
else:
streamlines = parse_streamline(data=sl_file, color=col, *args, **kwargs)
self.actors['tracts'].extend(streamlines)
return streamlines
@staticmethod
def add_injection_sites(self, experiments, color=None):
"""
Creates Spherse at the location of injections with a volume proportional to the injected volume
:param experiments: list of dictionaries with tractography data
:param color: (Default value = None)
"""
# check arguments
if not isinstance(experiments, list):
raise ValueError("experiments must be a list")
if not isinstance(experiments[0], dict):
raise ValueError("experiments should be a list of dictionaries")
#c= cgeck color
if color is None:
color = INJECTION_DEFAULT_COLOR
injection_sites = []
for exp in experiments:
injection_sites.append(shapes.Sphere(pos=(exp["injection_x"], exp["injection_y"], exp["injection_z"]),
r = INJECTION_VOLUME_SIZE*exp["injection_volume"]*3,
c=color
))
self.actors['injection_sites'].extend(injection_sites)
# ---------------------------------------------------------------------------- #
# STRUCTURE TREE INTERACTION #
# ---------------------------------------------------------------------------- #
# ------------------------- Get/Print structures sets ------------------------ #
def get_structures_sets(self):
"""
Get the Allen's structure sets.
"""
summary_structures = self.structure_tree.get_structures_by_set_id([167587189]) # main summary structures
summary_structures = [s for s in summary_structures if s["acronym"] not in self.excluded_regions]
self.structures = | pd.DataFrame(summary_structures) | pandas.DataFrame |
import unittest
from enda.timeseries import TimeSeries
import pandas as pd
import pytz
class TestTimeSeries(unittest.TestCase):
def test_collapse_dt_series_into_periods(self):
# periods is a list of (start, end) pairs.
periods = [
(pd.to_datetime('2018-01-01 00:15:00+01:00'), pd.to_datetime('2018-01-01 00:45:00+01:00')),
(pd.to_datetime('2018-01-01 10:15:00+01:00'), pd.to_datetime('2018-01-01 15:45:00+01:00')),
(pd.to_datetime('2018-01-01 20:15:00+01:00'), pd.to_datetime('2018-01-01 21:45:00+01:00')),
]
# expand periods to build a time-series with gaps
dti = pd.DatetimeIndex([])
for s, e in periods:
dti = dti.append(pd.date_range(s, e, freq="30min"))
self.assertEqual(2+12+4, dti.shape[0])
# now find periods in the time-series
# should work with 2 types of freq arguments
for freq in ["30min", pd.to_timedelta("30min")]:
computed_periods = TimeSeries.collapse_dt_series_into_periods(dti, freq)
self.assertEqual(len(computed_periods), len(periods))
for i in range(len(periods)):
self.assertEqual(computed_periods[i][0], periods[i][0])
self.assertEqual(computed_periods[i][1], periods[i][1])
def test_collapse_dt_series_into_periods_2(self):
dti = pd.DatetimeIndex([
pd.to_datetime('2018-01-01 00:15:00+01:00'),
pd.to_datetime('2018-01-01 00:45:00+01:00'),
pd.to_datetime('2018-01-01 00:30:00+01:00'),
pd.to_datetime('2018-01-01 01:00:00+01:00')
])
with self.assertRaises(ValueError):
# should raise an error because 15min gaps are not multiples of freq=30min
TimeSeries.collapse_dt_series_into_periods(dti, freq="30min")
def test_collapse_dt_series_into_periods_3(self):
dti = pd.DatetimeIndex([
pd.to_datetime('2018-01-01 00:00:00+01:00'),
pd.to_datetime('2018-01-01 00:15:00+01:00'),
pd.to_datetime('2018-01-01 00:30:00+01:00'),
pd.to_datetime('2018-01-01 00:45:00+01:00')
])
with self.assertRaises(ValueError):
# should raise an error because 15min gaps are not multiples of freq=30min
TimeSeries.collapse_dt_series_into_periods(dti, "30min")
def test_find_missing_and_extra_periods_1(self):
dti = pd.DatetimeIndex([
pd.to_datetime('2018-01-01 00:00:00+01:00'),
pd.to_datetime('2018-01-01 00:15:00+01:00'),
pd.to_datetime('2018-01-01 00:30:00+01:00'),
pd.to_datetime('2018-01-01 00:45:00+01:00'),
pd.to_datetime('2018-01-01 00:50:00+01:00'),
pd.to_datetime('2018-01-01 01:00:00+01:00'),
pd.to_datetime('2018-01-01 02:00:00+01:00'),
pd.to_datetime('2018-01-01 02:20:00+01:00')
])
freq, missing_periods, extra_points = TimeSeries.find_missing_and_extra_periods(dti, expected_freq="15min")
self.assertEqual(len(missing_periods), 2) # (01:15:00 -> 01:45:00), (02:15:00 -> 02:15:00)
self.assertEqual(len(extra_points), 2) # [00:50:00, 02:20:00]
def test_find_missing_and_extra_periods_2(self):
dti = pd.DatetimeIndex([
pd.to_datetime('2018-01-01 00:00:00+01:00'),
pd.to_datetime('2018-01-01 00:15:00+01:00'),
pd.to_datetime('2018-01-01 00:30:00+01:00'),
pd.to_datetime('2018-01-01 00:45:00+01:00'),
pd.to_datetime('2018-01-01 00:50:00+01:00'),
pd.to_datetime('2018-01-01 01:00:00+01:00'),
pd.to_datetime('2018-01-01 02:00:00+01:00'),
pd.to_datetime('2018-01-01 02:20:00+01:00')
])
# should work when we infer "expected_freq"
freq, missing_periods, extra_points = TimeSeries.find_missing_and_extra_periods(dti, expected_freq=None)
self.assertEqual(freq, pd.Timedelta("15min")) # inferred a 15min freq
self.assertEqual(len(missing_periods), 2) # (01:15:00 -> 01:45:00), (02:15:00 -> 02:15:00)
self.assertEqual(len(extra_points), 2) # [00:50:00, 02:20:00]
def test_find_missing_and_extra_periods_3(self):
dti = pd.DatetimeIndex([
pd.to_datetime('2018-01-01'),
pd.to_datetime('2018-01-02'),
pd.to_datetime('2018-01-03'),
pd.to_datetime('2018-01-03 12:00:00'),
| pd.to_datetime('2018-01-04') | pandas.to_datetime |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: | pd.Timestamp("2013-05-29 00:00:00") | pandas.Timestamp |
from collections import namedtuple
import pathlib
import timeit
import textwrap
import pytest
import hypothesis as hyp
import hypothesis.strategies as hyp_st
import hypothesis.extra.numpy as hyp_np
import numpy as np
import pandas as pd
from endaq.calc import psd, stats, utils
@hyp.given(
df=hyp_np.arrays(
dtype=np.float64,
shape=(200,),
elements=hyp_st.floats(
# leave at least half the bits of precision (52 / 2) in the
# mean-subtracted result
1,
1e26,
),
)
.map(
lambda array: (array - array.mean(keepdims=True))
# V this pushes the zero-mean'd values away from zero
* 2 ** (np.finfo(np.float64).minexp // 2)
)
.map(lambda array: pd.DataFrame(array, index=np.arange(len(array)) * 1e-1)),
)
def test_welch_parseval(df):
"""
Test to confirm that `scaling="parseval"` maintains consistency with the
time-domain RMS.
"""
df_psd = psd.welch(df, bin_width=1, scaling="parseval")
assert df_psd.to_numpy().sum() == pytest.approx(stats.rms(df.to_numpy()) ** 2)
@hyp.given(
psd_df=hyp_np.arrays(
dtype=np.float64,
shape=(20, 3),
elements=hyp_st.floats(0, 1e20),
).map(lambda array: pd.DataFrame(array, index=np.arange(len(array)) * 10)),
freq_splits=hyp_np.arrays(
dtype=np.float64,
shape=(8,),
elements=hyp_st.floats(0, 200, exclude_min=True),
unique=True,
).map(lambda array: np.sort(array)),
)
@pytest.mark.parametrize(
"agg1, agg2",
[
("mean", lambda x, axis=-1: np.nan_to_num(np.mean(x, axis=axis))),
("sum", np.sum),
],
)
@pytest.mark.filterwarnings("ignore:empty frequency bins:RuntimeWarning")
def test_to_jagged_modes(psd_df, freq_splits, agg1, agg2):
"""Test `to_jagged(..., mode='mean')` against the equivalent `mode=np.mean`."""
result1 = psd.to_jagged(psd_df, freq_splits, agg=agg1)
result2 = psd.to_jagged(psd_df, freq_splits, agg=agg2)
assert np.all(result1.index == result2.index)
np.testing.assert_allclose(
result1.to_numpy(),
result2.to_numpy(),
atol=psd_df.min().min() * 1e-7,
)
@pytest.mark.skip(
reason="timing test -> does not enforce functionality; takes too long"
)
def test_to_jagged_mode_times():
"""
Check that a situation exists where the histogram method is more
performant.
"""
setup = textwrap.dedent(
"""
from endaq.calc import psd
import numpy as np
import pandas as pd
n = 10 ** 4
axis = -1
psd_array = np.random.random((3, n))
f = np.arange(n) / 3
psd_df = pd.DataFrame(psd_array.T, index=f)
#freq_splits = np.logspace(0, np.log2(n), num=100, base=2)
freq_splits = f[1:-1]
"""
)
t_direct = timeit.timeit(
"psd.to_jagged(psd_df, freq_splits, agg=np.sum)",
setup=setup,
number=3,
)
t_hist = timeit.timeit(
"psd.to_jagged(psd_df, freq_splits, agg='sum')",
setup=setup,
number=3,
)
print(f"direct form time: {t_direct}")
print(f"histogram time: {t_hist}")
assert t_hist < t_direct
_TestStruct = namedtuple("_TestStruct", "psd_df, agg, expt_f, expt_array")
@pytest.mark.parametrize(
", ".join(_TestStruct._fields),
[
_TestStruct(
psd_df=pd.DataFrame([1, 0, 0, 0, 0, 0, 0, 0]),
agg="sum",
expt_f=[1, 2, 4, 8],
expt_array=[0, 0, 0, 0],
),
_TestStruct(
psd_df=pd.DataFrame([0, 1, 0, 0, 0, 0, 0, 0]),
agg="sum",
expt_f=[1, 2, 4, 8],
expt_array=[1, 0, 0, 0],
),
_TestStruct(
psd_df= | pd.DataFrame([0, 0, 1, 0, 0, 0, 0, 0]) | pandas.DataFrame |
import wandb
from wandb import data_types
import numpy as np
import pytest
import os
import sys
import datetime
from wandb.sdk.data_types._dtypes import *
class_labels = {1: "tree", 2: "car", 3: "road"}
test_folder = os.path.dirname(os.path.realpath(__file__))
im_path = os.path.join(test_folder, "..", "assets", "test.png")
def test_none_type():
assert TypeRegistry.type_of(None) == NoneType()
assert TypeRegistry.type_of(None).assign(None) == NoneType()
assert TypeRegistry.type_of(None).assign(1) == InvalidType()
def test_string_type():
assert TypeRegistry.type_of("Hello") == StringType()
assert TypeRegistry.type_of("Hello").assign("World") == StringType()
assert TypeRegistry.type_of("Hello").assign(None) == InvalidType()
assert TypeRegistry.type_of("Hello").assign(1) == InvalidType()
def test_number_type():
assert TypeRegistry.type_of(1.2) == NumberType()
assert TypeRegistry.type_of(1.2).assign(1) == NumberType()
assert TypeRegistry.type_of(1.2).assign(None) == InvalidType()
assert TypeRegistry.type_of(1.2).assign("hi") == InvalidType()
def make_datetime():
return datetime.datetime(2000, 12, 1)
def make_date():
return datetime.date(2000, 12, 1)
def make_datetime64():
return np.datetime64("2000-12-01")
def test_timestamp_type():
assert TypeRegistry.type_of(make_datetime()) == TimestampType()
assert (
TypeRegistry.type_of(make_datetime())
.assign(make_date())
.assign(make_datetime64())
== TimestampType()
)
assert TypeRegistry.type_of(make_datetime()).assign(None) == InvalidType()
assert TypeRegistry.type_of(make_datetime()).assign(1) == InvalidType()
def test_boolean_type():
assert TypeRegistry.type_of(True) == BooleanType()
assert TypeRegistry.type_of(True).assign(False) == BooleanType()
assert TypeRegistry.type_of(True).assign(None) == InvalidType()
assert TypeRegistry.type_of(True).assign(1) == InvalidType()
def test_any_type():
assert AnyType() == AnyType().assign(1)
assert AnyType().assign(None) == InvalidType()
def test_never_type():
assert InvalidType().assign(1) == InvalidType()
assert InvalidType().assign("a") == InvalidType()
assert InvalidType().assign(True) == InvalidType()
assert InvalidType().assign(None) == InvalidType()
def test_unknown_type():
assert UnknownType().assign(1) == NumberType()
assert UnknownType().assign(None) == InvalidType()
def test_union_type():
wb_type = UnionType([float, str])
assert wb_type.assign(1) == wb_type
assert wb_type.assign("s") == wb_type
assert wb_type.assign(True) == InvalidType()
wb_type = UnionType([float, AnyType()])
assert wb_type.assign(1) == wb_type
assert wb_type.assign("s") == wb_type
assert wb_type.assign(True) == wb_type
wb_type = UnionType([float, UnknownType()])
assert wb_type.assign(1) == wb_type
assert wb_type.assign("s") == UnionType([float, StringType()])
assert wb_type.assign(None) == InvalidType()
wb_type = UnionType([float, OptionalType(UnknownType())])
assert wb_type.assign(None).assign(True) == UnionType(
[float, OptionalType(BooleanType())]
)
wb_type = UnionType([float, UnionType([str, UnknownType()])])
assert wb_type.assign(1) == wb_type
assert wb_type.assign("s") == wb_type
assert wb_type.assign(True) == UnionType([float, str, bool])
assert wb_type.assign(None) == InvalidType()
def test_const_type():
wb_type = ConstType(1)
assert wb_type.assign(1) == wb_type
assert wb_type.assign("a") == InvalidType()
assert wb_type.assign(2) == InvalidType()
def test_set_const_type():
wb_type = ConstType(set())
assert wb_type.assign(set()) == wb_type
assert wb_type.assign(None) == InvalidType()
assert wb_type.assign({1}) == InvalidType()
assert wb_type.assign([]) == InvalidType()
wb_type = ConstType({1, 2, 3})
assert wb_type.assign(set()) == InvalidType()
assert wb_type.assign(None) == InvalidType()
assert wb_type.assign({1, 2, 3}) == wb_type
assert wb_type.assign([1, 2, 3]) == InvalidType()
def test_object_type():
wb_type = TypeRegistry.type_of(np.random.rand(30))
assert wb_type.assign(np.random.rand(30)) == wb_type
assert wb_type.assign(4) == InvalidType()
def test_list_type():
assert ListType(int).assign([]) == ListType(int, 0)
assert ListType(int).assign([1, 2, 3]) == ListType(int, 3)
assert ListType(int).assign([1, "a", 3]) == InvalidType()
def test_dict_type():
spec = {
"number": float,
"nested": {
"list_str": [str],
},
}
exact = {
"number": 1,
"nested": {
"list_str": ["hello", "world"],
},
}
subset = {"nested": {"list_str": ["hi"]}}
narrow = {"number": 1, "string": "hi"}
wb_type = TypeRegistry.type_of(exact)
assert wb_type.assign(exact) == wb_type
assert wb_type.assign(subset) == InvalidType()
assert wb_type.assign(narrow) == InvalidType()
spec = {
"optional_number": OptionalType(float),
"optional_unknown": OptionalType(UnknownType()),
}
wb_type = TypedDictType(spec)
assert wb_type.assign({}) == wb_type
assert wb_type.assign({"optional_number": 1}) == wb_type
assert wb_type.assign({"optional_number": "1"}) == InvalidType()
assert wb_type.assign({"optional_unknown": "hi"}) == TypedDictType(
{
"optional_number": OptionalType(float),
"optional_unknown": OptionalType(str),
}
)
assert wb_type.assign({"optional_unknown": None}) == TypedDictType(
{
"optional_number": OptionalType(float),
"optional_unknown": OptionalType(UnknownType()),
}
)
wb_type = TypedDictType({"unknown": UnknownType()})
assert wb_type.assign({}) == InvalidType()
assert wb_type.assign({"unknown": None}) == InvalidType()
assert wb_type.assign({"unknown": 1}) == TypedDictType(
{"unknown": float},
)
def test_nested_dict():
notation_type = TypedDictType(
{
"a": float,
"b": bool,
"c": str,
"d": UnknownType(),
"e": {},
"f": [],
"g": [
[
{
"a": float,
"b": bool,
"c": str,
"d": UnknownType(),
"e": {},
"f": [],
"g": [[]],
}
]
],
}
)
expanded_type = TypedDictType(
{
"a": NumberType(),
"b": BooleanType(),
"c": StringType(),
"d": UnknownType(),
"e": TypedDictType({}),
"f": ListType(),
"g": ListType(
ListType(
TypedDictType(
{
"a": NumberType(),
"b": BooleanType(),
"c": StringType(),
"d": UnknownType(),
"e": TypedDictType({}),
"f": ListType(),
"g": ListType(ListType()),
}
)
)
),
}
)
example = {
"a": 1,
"b": True,
"c": "StringType()",
"d": "hi",
"e": {},
"f": [1],
"g": [
[
{
"a": 2,
"b": False,
"c": "StringType()",
"d": 3,
"e": {},
"f": [],
"g": [[5]],
}
]
],
}
real_type = TypedDictType.from_obj(example)
assert notation_type == expanded_type
assert notation_type.assign(example) == real_type
def test_image_type():
wb_type = data_types._ImageFileType()
image_simple = data_types.Image(np.random.rand(10, 10))
wb_type_simple = data_types._ImageFileType.from_obj(image_simple)
image_annotated = data_types.Image(
np.random.rand(10, 10),
boxes={
"box_predictions": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
"box_ground_truth": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
},
masks={
"mask_predictions": {
"mask_data": np.random.randint(0, 4, size=(30, 30)),
"class_labels": class_labels,
},
"mask_ground_truth": {"path": im_path, "class_labels": class_labels},
},
)
wb_type_annotated = data_types._ImageFileType.from_obj(image_annotated)
image_annotated_differently = data_types.Image(
np.random.rand(10, 10),
boxes={
"box_predictions": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
},
masks={
"mask_predictions": {
"mask_data": np.random.randint(0, 4, size=(30, 30)),
"class_labels": class_labels,
},
"mask_ground_truth_2": {"path": im_path, "class_labels": class_labels},
},
)
assert wb_type.assign(image_simple) == wb_type_simple
assert wb_type.assign(image_annotated) == wb_type_annotated
# OK to assign Images with disjoint class set
assert wb_type_annotated.assign(image_simple) == wb_type_annotated
# Merge when disjoint
assert wb_type_annotated.assign(
image_annotated_differently
) == data_types._ImageFileType(
box_layers={"box_predictions": {1, 2, 3}, "box_ground_truth": {1, 2, 3}},
box_score_keys={"loss", "acc"},
mask_layers={
"mask_ground_truth_2": set(),
"mask_ground_truth": set(),
"mask_predictions": {1, 2, 3},
},
class_map={"1": "tree", "2": "car", "3": "road"},
)
def test_classes_type():
wb_classes = data_types.Classes(
[
{"id": 1, "name": "cat"},
{"id": 2, "name": "dog"},
{"id": 3, "name": "horse"},
]
)
wb_class_type = (
wandb.wandb_sdk.data_types.helper_types.classes._ClassesIdType.from_obj(
wb_classes
)
)
assert wb_class_type.assign(1) == wb_class_type
assert wb_class_type.assign(0) == InvalidType()
def test_table_type():
table_1 = wandb.Table(columns=["col"], data=[[1]])
t1 = data_types._TableType.from_obj(table_1)
table_2 = wandb.Table(columns=["col"], data=[[1.3]])
table_3 = wandb.Table(columns=["col"], data=[["a"]])
assert t1.assign(table_2) == t1
assert t1.assign(table_3) == InvalidType()
def test_table_implicit_types():
table = wandb.Table(columns=["col"])
table.add_data(None)
table.add_data(1)
with pytest.raises(TypeError):
table.add_data("a")
table = wandb.Table(columns=["col"], optional=False)
with pytest.raises(TypeError):
table.add_data(None)
table.add_data(1)
with pytest.raises(TypeError):
table.add_data("a")
def test_table_allow_mixed_types():
table = wandb.Table(columns=["col"], allow_mixed_types=True)
table.add_data(None)
table.add_data(1)
table.add_data("a") # No error with allow_mixed_types
table = wandb.Table(columns=["col"], optional=False, allow_mixed_types=True)
with pytest.raises(TypeError):
table.add_data(None) # Still errors since optional is false
table.add_data(1)
table.add_data("a") # No error with allow_mixed_types
def test_tables_with_dicts():
good_data = [
[None],
[
{
"a": [
{
"b": 1,
"c": [
[
{
"d": 1,
"e": wandb.Image(
np.random.randint(255, size=(10, 10))
),
}
]
],
}
]
}
],
[
{
"a": [
{
"b": 1,
"c": [
[
{
"d": 1,
"e": wandb.Image(
np.random.randint(255, size=(10, 10))
),
}
]
],
}
]
}
],
]
bad_data = [
[None],
[
{
"a": [
{
"b": 1,
"c": [
[
{
"d": 1,
"e": wandb.Image(
np.random.randint(255, size=(10, 10))
),
}
]
],
}
]
}
],
[
{
"a": [
{
"b": 1,
"c": [
[
{
"d": 1,
}
]
],
}
]
}
],
]
table = wandb.Table(columns=["A"], data=good_data, allow_mixed_types=True)
table = wandb.Table(columns=["A"], data=bad_data, allow_mixed_types=True)
table = wandb.Table(columns=["A"], data=good_data)
with pytest.raises(TypeError):
table = wandb.Table(columns=["A"], data=bad_data)
def test_table_explicit_types():
table = wandb.Table(columns=["a", "b"], dtype=int)
table.add_data(None, None)
table.add_data(1, 2)
with pytest.raises(TypeError):
table.add_data(1, "a")
table = wandb.Table(columns=["a", "b"], optional=False, dtype=[int, str])
with pytest.raises(TypeError):
table.add_data(None, None)
table.add_data(1, "a")
with pytest.raises(TypeError):
table.add_data("a", "a")
table = wandb.Table(columns=["a", "b"], optional=[False, True], dtype=[int, str])
with pytest.raises(TypeError):
table.add_data(None, None)
with pytest.raises(TypeError):
table.add_data(None, "a")
table.add_data(1, None)
table.add_data(1, "a")
with pytest.raises(TypeError):
table.add_data("a", "a")
def test_table_type_cast():
table = wandb.Table(columns=["type_col"])
table.add_data(1)
wb_classes = data_types.Classes(
[
{"id": 1, "name": "cat"},
{"id": 2, "name": "dog"},
{"id": 3, "name": "horse"},
]
)
table.cast("type_col", wb_classes.get_type())
table.add_data(2)
with pytest.raises(TypeError):
table.add_data(4)
box_annotation = {
"box_predictions": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
"box_ground_truth": {
"box_data": [
{
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4,
},
"class_id": 1,
"box_caption": "minMax(pixel)",
"scores": {"acc": 0.1, "loss": 1.2},
},
],
"class_labels": class_labels,
},
}
mask_annotation = {
"mask_predictions": {
"mask_data": np.random.randint(0, 4, size=(30, 30)),
"class_labels": class_labels,
},
"mask_ground_truth": {"path": im_path, "class_labels": class_labels},
}
def test_table_specials():
table = wandb.Table(
columns=["image", "table"],
optional=False,
dtype=[data_types.Image, data_types.Table],
)
with pytest.raises(TypeError):
table.add_data(None, None)
# Infers specific types from first valid row
table.add_data(
data_types.Image(
np.random.rand(10, 10),
boxes=box_annotation,
masks=mask_annotation,
),
data_types.Table(data=[[1, True, None]]),
)
# Denies conflict
with pytest.raises(TypeError):
table.add_data(
"hello",
data_types.Table(data=[[1, True, None]]),
)
# Denies conflict
with pytest.raises(TypeError):
table.add_data(
data_types.Image(
np.random.rand(10, 10),
boxes=box_annotation,
masks=mask_annotation,
),
data_types.Table(data=[[1, "True", None]]),
)
# allows further refinement
table.add_data(
data_types.Image(
np.random.rand(10, 10),
boxes=box_annotation,
masks=mask_annotation,
),
data_types.Table(data=[[1, True, 1]]),
)
# allows addition
table.add_data(
data_types.Image(
np.random.rand(10, 10),
boxes=box_annotation,
masks=mask_annotation,
),
data_types.Table(data=[[1, True, 1]]),
)
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="no pandas py3.10 wheel")
def test_nan_non_float():
import pandas as pd
wandb.Table(dataframe=pd.DataFrame(data=[["A"], [np.nan]], columns=["a"]))
def test_table_typing_numpy():
# Pulled from https://numpy.org/devdocs/user/basics.types.html
# Numerics
table = wandb.Table(columns=["A"], dtype=[NumberType])
table.add_data(None)
table.add_data(42)
table.add_data(np.byte(1))
table.add_data(np.short(42))
table.add_data(np.ushort(42))
table.add_data(np.intc(42))
table.add_data(np.uintc(42))
table.add_data(np.int_(42))
table.add_data(np.uint(42))
table.add_data(np.longlong(42))
table.add_data(np.ulonglong(42))
table.add_data(np.half(42))
table.add_data(np.float16(42))
table.add_data(np.single(42))
table.add_data(np.double(42))
table.add_data(np.longdouble(42))
table.add_data(np.csingle(42))
table.add_data(np.cdouble(42))
table.add_data(np.clongdouble(42))
table.add_data(np.int8(42))
table.add_data(np.int16(42))
table.add_data(np.int32(42))
table.add_data(np.int64(42))
table.add_data(np.uint8(42))
table.add_data(np.uint16(42))
table.add_data(np.uint32(42))
table.add_data(np.uint64(42))
table.add_data(np.intp(42))
table.add_data(np.uintp(42))
table.add_data(np.float32(42))
table.add_data(np.float64(42))
table.add_data(np.float_(42))
table.add_data(np.complex64(42))
table.add_data(np.complex128(42))
table.add_data(np.complex_(42))
# Booleans
table = wandb.Table(columns=["A"], dtype=[BooleanType])
table.add_data(None)
table.add_data(True)
table.add_data(False)
table.add_data(np.bool_(True))
# Array of Numerics
table = wandb.Table(columns=["A"], dtype=[[NumberType]])
table.add_data(None)
table.add_data([42])
table.add_data(np.array([1, 0], dtype=np.byte))
table.add_data(np.array([42, 42], dtype=np.short))
table.add_data(np.array([42, 42], dtype=np.ushort))
table.add_data(np.array([42, 42], dtype=np.intc))
table.add_data(np.array([42, 42], dtype=np.uintc))
table.add_data(np.array([42, 42], dtype=np.int_))
table.add_data(np.array([42, 42], dtype=np.uint))
table.add_data(np.array([42, 42], dtype=np.longlong))
table.add_data(np.array([42, 42], dtype=np.ulonglong))
table.add_data(np.array([42, 42], dtype=np.half))
table.add_data(np.array([42, 42], dtype=np.float16))
table.add_data(np.array([42, 42], dtype=np.single))
table.add_data(np.array([42, 42], dtype=np.double))
table.add_data(np.array([42, 42], dtype=np.longdouble))
table.add_data(np.array([42, 42], dtype=np.csingle))
table.add_data(np.array([42, 42], dtype=np.cdouble))
table.add_data(np.array([42, 42], dtype=np.clongdouble))
table.add_data(np.array([42, 42], dtype=np.int8))
table.add_data(np.array([42, 42], dtype=np.int16))
table.add_data(np.array([42, 42], dtype=np.int32))
table.add_data(np.array([42, 42], dtype=np.int64))
table.add_data(np.array([42, 42], dtype=np.uint8))
table.add_data(np.array([42, 42], dtype=np.uint16))
table.add_data(np.array([42, 42], dtype=np.uint32))
table.add_data(np.array([42, 42], dtype=np.uint64))
table.add_data(np.array([42, 42], dtype=np.intp))
table.add_data(np.array([42, 42], dtype=np.uintp))
table.add_data(np.array([42, 42], dtype=np.float32))
table.add_data(np.array([42, 42], dtype=np.float64))
table.add_data(np.array([42, 42], dtype=np.float_))
table.add_data(np.array([42, 42], dtype=np.complex64))
table.add_data(np.array([42, 42], dtype=np.complex128))
table.add_data(np.array([42, 42], dtype=np.complex_))
# Array of Booleans
table = wandb.Table(columns=["A"], dtype=[[BooleanType]])
table.add_data(None)
table.add_data([True])
table.add_data([False])
table.add_data(np.array([True, False], dtype=np.bool_))
# Nested arrays
table = wandb.Table(columns=["A"])
table.add_data([[[[1, 2, 3]]]])
table.add_data(np.array([[[[1, 2, 3]]]]))
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="no pandas py3.10 wheel")
def test_table_typing_pandas():
import pandas as pd
# TODO: Pandas https://pandas.pydata.org/pandas-docs/stable/user_guide/basics.html#basics-dtypes
# Numerics
table = wandb.Table(dataframe=pd.DataFrame([[1], [0]]).astype(np.byte))
table.add_data(1)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.short))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.ushort))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.intc))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.uintc))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.int_))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.uint))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.longlong))
table.add_data(42)
table = wandb.Table(dataframe=pd.DataFrame([[42], [42]]).astype(np.ulonglong))
table.add_data(42)
table = wandb.Table(dataframe= | pd.DataFrame([[42], [42]]) | pandas.DataFrame |
from __future__ import annotations
from datetime import (
datetime,
time,
timedelta,
tzinfo,
)
from typing import (
TYPE_CHECKING,
Literal,
overload,
)
import warnings
import numpy as np
from pandas._libs import (
lib,
tslib,
)
from pandas._libs.arrays import NDArrayBacked
from pandas._libs.tslibs import (
BaseOffset,
NaT,
NaTType,
Resolution,
Timestamp,
conversion,
fields,
get_resolution,
iNaT,
ints_to_pydatetime,
is_date_array_normalized,
normalize_i8_timestamps,
timezones,
to_offset,
tzconversion,
)
from pandas._typing import npt
from pandas.errors import PerformanceWarning
from pandas.util._validators import validate_inclusive
from pandas.core.dtypes.cast import astype_dt64_to_dt64tz
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_object_dtype,
is_period_dtype,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.core.dtypes.missing import isna
from pandas.core.algorithms import checked_add_with_arr
from pandas.core.arrays import (
ExtensionArray,
datetimelike as dtl,
)
from pandas.core.arrays._ranges import generate_regular_range
from pandas.core.arrays.integer import IntegerArray
import pandas.core.common as com
from pandas.core.construction import extract_array
from pandas.tseries.frequencies import get_period_alias
from pandas.tseries.offsets import (
BDay,
Day,
Tick,
)
if TYPE_CHECKING:
from pandas import DataFrame
from pandas.core.arrays import (
PeriodArray,
TimedeltaArray,
)
_midnight = time(0, 0)
def tz_to_dtype(tz):
"""
Return a datetime64[ns] dtype appropriate for the given timezone.
Parameters
----------
tz : tzinfo or None
Returns
-------
np.dtype or Datetime64TZDType
"""
if tz is None:
return DT64NS_DTYPE
else:
return DatetimeTZDtype(tz=tz)
def _field_accessor(name: str, field: str, docstring=None):
def f(self):
values = self._local_timestamps()
if field in self._bool_ops:
result: np.ndarray
if field.endswith(("start", "end")):
freq = self.freq
month_kw = 12
if freq:
kwds = freq.kwds
month_kw = kwds.get("startingMonth", kwds.get("month", 12))
result = fields.get_start_end_field(
values, field, self.freqstr, month_kw
)
else:
result = fields.get_date_field(values, field)
# these return a boolean by-definition
return result
if field in self._object_ops:
result = fields.get_date_name_field(values, field)
result = self._maybe_mask_results(result, fill_value=None)
else:
result = fields.get_date_field(values, field)
result = self._maybe_mask_results(
result, fill_value=None, convert="float64"
)
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps):
"""
Pandas ExtensionArray for tz-naive or tz-aware datetime data.
.. warning::
DatetimeArray is currently experimental, and its API may change
without warning. In particular, :attr:`DatetimeArray.dtype` is
expected to change to always be an instance of an ``ExtensionDtype``
subclass.
Parameters
----------
values : Series, Index, DatetimeArray, ndarray
The datetime data.
For DatetimeArray `values` (or a Series or Index boxing one),
`dtype` and `freq` will be extracted from `values`.
dtype : numpy.dtype or DatetimeTZDtype
Note that the only NumPy dtype allowed is 'datetime64[ns]'.
freq : str or Offset, optional
The frequency.
copy : bool, default False
Whether to copy the underlying array of values.
Attributes
----------
None
Methods
-------
None
"""
_typ = "datetimearray"
_scalar_type = Timestamp
_recognized_scalars = (datetime, np.datetime64)
_is_recognized_dtype = is_datetime64_any_dtype
_infer_matches = ("datetime", "datetime64", "date")
# define my properties & methods for delegation
_bool_ops: list[str] = [
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
"is_leap_year",
]
_object_ops: list[str] = ["freq", "tz"]
_field_ops: list[str] = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"weekofyear",
"week",
"weekday",
"dayofweek",
"day_of_week",
"dayofyear",
"day_of_year",
"quarter",
"days_in_month",
"daysinmonth",
"microsecond",
"nanosecond",
]
_other_ops: list[str] = ["date", "time", "timetz"]
_datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops + _other_ops
_datetimelike_methods: list[str] = [
"to_period",
"tz_localize",
"tz_convert",
"normalize",
"strftime",
"round",
"floor",
"ceil",
"month_name",
"day_name",
]
# ndim is inherited from ExtensionArray, must exist to ensure
# Timestamp.__richcmp__(DateTimeArray) operates pointwise
# ensure that operations with numpy arrays defer to our implementation
__array_priority__ = 1000
# -----------------------------------------------------------------
# Constructors
_dtype: np.dtype | DatetimeTZDtype
_freq = None
def __init__(self, values, dtype=DT64NS_DTYPE, freq=None, copy: bool = False):
values = extract_array(values, extract_numpy=True)
if isinstance(values, IntegerArray):
values = values.to_numpy("int64", na_value=iNaT)
inferred_freq = getattr(values, "_freq", None)
if isinstance(values, type(self)):
# validation
dtz = getattr(dtype, "tz", None)
if dtz and values.tz is None:
dtype = DatetimeTZDtype(tz=dtype.tz)
elif dtz and values.tz:
if not timezones.tz_compare(dtz, values.tz):
msg = (
"Timezone of the array and 'dtype' do not match. "
f"'{dtz}' != '{values.tz}'"
)
raise TypeError(msg)
elif values.tz:
dtype = values.dtype
if freq is None:
freq = values.freq
values = values._ndarray
if not isinstance(values, np.ndarray):
raise ValueError(
f"Unexpected type '{type(values).__name__}'. 'values' must be "
"a DatetimeArray, ndarray, or Series or Index containing one of those."
)
if values.ndim not in [1, 2]:
raise ValueError("Only 1-dimensional input arrays are supported.")
if values.dtype == "i8":
# for compat with datetime/timedelta/period shared methods,
# we can sometimes get here with int64 values. These represent
# nanosecond UTC (or tz-naive) unix timestamps
values = values.view(DT64NS_DTYPE)
if values.dtype != DT64NS_DTYPE:
raise ValueError(
"The dtype of 'values' is incorrect. Must be 'datetime64[ns]'. "
f"Got {values.dtype} instead."
)
dtype = _validate_dt64_dtype(dtype)
if freq == "infer":
raise ValueError(
"Frequency inference not allowed in DatetimeArray.__init__. "
"Use 'pd.array()' instead."
)
if copy:
values = values.copy()
if freq:
freq = to_offset(freq)
if getattr(dtype, "tz", None):
# https://github.com/pandas-dev/pandas/issues/18595
# Ensure that we have a standard timezone for pytz objects.
# Without this, things like adding an array of timedeltas and
# a tz-aware Timestamp (with a tz specific to its datetime) will
# be incorrect(ish?) for the array as a whole
dtype = DatetimeTZDtype(tz=timezones.tz_standardize(dtype.tz))
NDArrayBacked.__init__(self, values=values, dtype=dtype)
self._freq = freq
if inferred_freq is None and freq is not None:
type(self)._validate_frequency(self, freq)
# error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
@classmethod
def _simple_new( # type: ignore[override]
cls, values: np.ndarray, freq: BaseOffset | None = None, dtype=DT64NS_DTYPE
) -> DatetimeArray:
assert isinstance(values, np.ndarray)
assert values.dtype == DT64NS_DTYPE
result = super()._simple_new(values, dtype)
result._freq = freq
return result
@classmethod
def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False):
return cls._from_sequence_not_strict(scalars, dtype=dtype, copy=copy)
@classmethod
def _from_sequence_not_strict(
cls,
data,
dtype=None,
copy: bool = False,
tz=None,
freq=lib.no_default,
dayfirst: bool = False,
yearfirst: bool = False,
ambiguous="raise",
):
explicit_none = freq is None
freq = freq if freq is not lib.no_default else None
freq, freq_infer = dtl.maybe_infer_freq(freq)
subarr, tz, inferred_freq = sequence_to_dt64ns(
data,
dtype=dtype,
copy=copy,
tz=tz,
dayfirst=dayfirst,
yearfirst=yearfirst,
ambiguous=ambiguous,
)
freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer)
if explicit_none:
freq = None
dtype = tz_to_dtype(tz)
result = cls._simple_new(subarr, freq=freq, dtype=dtype)
if inferred_freq is None and freq is not None:
# this condition precludes `freq_infer`
cls._validate_frequency(result, freq, ambiguous=ambiguous)
elif freq_infer:
# Set _freq directly to bypass duplicative _validate_frequency
# check.
result._freq = to_offset(result.inferred_freq)
return result
@classmethod
def _generate_range(
cls,
start,
end,
periods,
freq,
tz=None,
normalize=False,
ambiguous="raise",
nonexistent="raise",
inclusive="both",
):
periods = dtl.validate_periods(periods)
if freq is None and any(x is None for x in [periods, start, end]):
raise ValueError("Must provide freq argument if no data is supplied")
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the four parameters: start, end, periods, "
"and freq, exactly three must be specified"
)
freq = to_offset(freq)
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if start is NaT or end is NaT:
raise ValueError("Neither `start` nor `end` can be NaT")
left_inclusive, right_inclusive = validate_inclusive(inclusive)
start, end, _normalized = _maybe_normalize_endpoints(start, end, normalize)
tz = _infer_tz_from_endpoints(start, end, tz)
if tz is not None:
# Localize the start and end arguments
start_tz = None if start is None else start.tz
end_tz = None if end is None else end.tz
start = _maybe_localize_point(
start, start_tz, start, freq, tz, ambiguous, nonexistent
)
end = _maybe_localize_point(
end, end_tz, end, freq, tz, ambiguous, nonexistent
)
if freq is not None:
# We break Day arithmetic (fixed 24 hour) here and opt for
# Day to mean calendar day (23/24/25 hour). Therefore, strip
# tz info from start and day to avoid DST arithmetic
if isinstance(freq, Day):
if start is not None:
start = start.tz_localize(None)
if end is not None:
end = end.tz_localize(None)
if isinstance(freq, Tick):
values = generate_regular_range(start, end, periods, freq)
else:
xdr = generate_range(start=start, end=end, periods=periods, offset=freq)
values = np.array([x.value for x in xdr], dtype=np.int64)
_tz = start.tz if start is not None else end.tz
values = values.view("M8[ns]")
index = cls._simple_new(values, freq=freq, dtype=tz_to_dtype(_tz))
if tz is not None and index.tz is None:
arr = tzconversion.tz_localize_to_utc(
index.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent
)
index = cls(arr)
# index is localized datetime64 array -> have to convert
# start/end as well to compare
if start is not None:
start = start.tz_localize(tz, ambiguous, nonexistent).asm8
if end is not None:
end = end.tz_localize(tz, ambiguous, nonexistent).asm8
else:
# Create a linearly spaced date_range in local time
# Nanosecond-granularity timestamps aren't always correctly
# representable with doubles, so we limit the range that we
# pass to np.linspace as much as possible
arr = (
np.linspace(0, end.value - start.value, periods, dtype="int64")
+ start.value
)
dtype = tz_to_dtype(tz)
arr = arr.astype("M8[ns]", copy=False)
index = cls._simple_new(arr, freq=None, dtype=dtype)
if start == end:
if not left_inclusive and not right_inclusive:
index = index[1:-1]
else:
if not left_inclusive or not right_inclusive:
if not left_inclusive and len(index) and index[0] == start:
index = index[1:]
if not right_inclusive and len(index) and index[-1] == end:
index = index[:-1]
dtype = tz_to_dtype(tz)
return cls._simple_new(index._ndarray, freq=freq, dtype=dtype)
# -----------------------------------------------------------------
# DatetimeLike Interface
def _unbox_scalar(self, value, setitem: bool = False) -> np.datetime64:
if not isinstance(value, self._scalar_type) and value is not NaT:
raise ValueError("'value' should be a Timestamp.")
self._check_compatible_with(value, setitem=setitem)
return value.asm8
def _scalar_from_string(self, value) -> Timestamp | NaTType:
return Timestamp(value, tz=self.tz)
def _check_compatible_with(self, other, setitem: bool = False):
if other is NaT:
return
self._assert_tzawareness_compat(other)
if setitem:
# Stricter check for setitem vs comparison methods
if not timezones.tz_compare(self.tz, other.tz):
raise ValueError(f"Timezones don't match. '{self.tz}' != '{other.tz}'")
# -----------------------------------------------------------------
# Descriptive Properties
def _box_func(self, x) -> Timestamp | NaTType:
if isinstance(x, np.datetime64):
# GH#42228
# Argument 1 to "signedinteger" has incompatible type "datetime64";
# expected "Union[SupportsInt, Union[str, bytes], SupportsIndex]"
x = np.int64(x) # type: ignore[arg-type]
ts = Timestamp(x, tz=self.tz)
# Non-overlapping identity check (left operand type: "Timestamp",
# right operand type: "NaTType")
if ts is not NaT: # type: ignore[comparison-overlap]
# GH#41586
# do this instead of passing to the constructor to avoid FutureWarning
ts._set_freq(self.freq)
return ts
@property
# error: Return type "Union[dtype, DatetimeTZDtype]" of "dtype"
# incompatible with return type "ExtensionDtype" in supertype
# "ExtensionArray"
def dtype(self) -> np.dtype | DatetimeTZDtype: # type: ignore[override]
"""
The dtype for the DatetimeArray.
.. warning::
A future version of pandas will change dtype to never be a
``numpy.dtype``. Instead, :attr:`DatetimeArray.dtype` will
always be an instance of an ``ExtensionDtype`` subclass.
Returns
-------
numpy.dtype or DatetimeTZDtype
If the values are tz-naive, then ``np.dtype('datetime64[ns]')``
is returned.
If the values are tz-aware, then the ``DatetimeTZDtype``
is returned.
"""
return self._dtype
@property
def tz(self) -> tzinfo | None:
"""
Return timezone, if any.
Returns
-------
datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None
Returns None when the array is tz-naive.
"""
# GH 18595
return getattr(self.dtype, "tz", None)
@tz.setter
def tz(self, value):
# GH 3746: Prevent localizing or converting the index by setting tz
raise AttributeError(
"Cannot directly set timezone. Use tz_localize() "
"or tz_convert() as appropriate"
)
@property
def tzinfo(self) -> tzinfo | None:
"""
Alias for tz attribute
"""
return self.tz
@property # NB: override with cache_readonly in immutable subclasses
def is_normalized(self) -> bool:
"""
Returns True if all of the dates are at midnight ("no time")
"""
return is_date_array_normalized(self.asi8, self.tz)
@property # NB: override with cache_readonly in immutable subclasses
def _resolution_obj(self) -> Resolution:
return get_resolution(self.asi8, self.tz)
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
def __array__(self, dtype=None) -> np.ndarray:
if dtype is None and self.tz:
# The default for tz-aware is object, to preserve tz info
dtype = object
return super().__array__(dtype=dtype)
def __iter__(self):
"""
Return an iterator over the boxed values
Yields
------
tstamp : Timestamp
"""
if self.ndim > 1:
for i in range(len(self)):
yield self[i]
else:
# convert in chunks of 10k for efficiency
data = self.asi8
length = len(self)
chunksize = 10000
chunks = (length // chunksize) + 1
with warnings.catch_warnings():
# filter out warnings about Timestamp.freq
warnings.filterwarnings("ignore", category=FutureWarning)
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, length)
converted = ints_to_pydatetime(
data[start_i:end_i], tz=self.tz, freq=self.freq, box="timestamp"
)
yield from converted
def astype(self, dtype, copy: bool = True):
# We handle
# --> datetime
# --> period
# DatetimeLikeArrayMixin Super handles the rest.
dtype = pandas_dtype(dtype)
if is_dtype_equal(dtype, self.dtype):
if copy:
return self.copy()
return self
elif is_datetime64_ns_dtype(dtype):
return astype_dt64_to_dt64tz(self, dtype, copy, via_utc=False)
elif self.tz is None and is_datetime64_dtype(dtype) and dtype != self.dtype:
# unit conversion e.g. datetime64[s]
return self._ndarray.astype(dtype)
elif is_period_dtype(dtype):
return self.to_period(freq=dtype.freq)
return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy)
# -----------------------------------------------------------------
# Rendering Methods
@dtl.ravel_compat
def _format_native_types(
self, na_rep="NaT", date_format=None, **kwargs
) -> npt.NDArray[np.object_]:
from pandas.io.formats.format import get_format_datetime64_from_values
fmt = get_format_datetime64_from_values(self, date_format)
return tslib.format_array_from_datetime(
self.asi8, tz=self.tz, format=fmt, na_rep=na_rep
)
# -----------------------------------------------------------------
# Comparison Methods
def _has_same_tz(self, other) -> bool:
# vzone shouldn't be None if value is non-datetime like
if isinstance(other, np.datetime64):
# convert to Timestamp as np.datetime64 doesn't have tz attr
other = Timestamp(other)
if not hasattr(other, "tzinfo"):
return False
other_tz = other.tzinfo
return timezones.tz_compare(self.tzinfo, other_tz)
def _assert_tzawareness_compat(self, other) -> None:
# adapted from _Timestamp._assert_tzawareness_compat
other_tz = getattr(other, "tzinfo", None)
other_dtype = getattr(other, "dtype", None)
if is_datetime64tz_dtype(other_dtype):
# Get tzinfo from Series dtype
other_tz = other.dtype.tz
if other is NaT:
# pd.NaT quacks both aware and naive
pass
elif self.tz is None:
if other_tz is not None:
raise TypeError(
"Cannot compare tz-naive and tz-aware datetime-like objects."
)
elif other_tz is None:
raise TypeError(
"Cannot compare tz-naive and tz-aware datetime-like objects"
)
# -----------------------------------------------------------------
# Arithmetic Methods
def _sub_datetime_arraylike(self, other):
"""subtract DatetimeArray/Index or ndarray[datetime64]"""
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
if isinstance(other, np.ndarray):
assert is_datetime64_dtype(other)
other = type(self)(other)
if not self._has_same_tz(other):
# require tz compat
raise TypeError(
f"{type(self).__name__} subtraction must have the same "
"timezones or no timezones"
)
self_i8 = self.asi8
other_i8 = other.asi8
arr_mask = self._isnan | other._isnan
new_values = checked_add_with_arr(self_i8, -other_i8, arr_mask=arr_mask)
if self._hasnans or other._hasnans:
np.putmask(new_values, arr_mask, iNaT)
return new_values.view("timedelta64[ns]")
def _add_offset(self, offset) -> DatetimeArray:
if self.ndim == 2:
return self.ravel()._add_offset(offset).reshape(self.shape)
assert not isinstance(offset, Tick)
try:
if self.tz is not None:
values = self.tz_localize(None)
else:
values = self
result = offset._apply_array(values).view("M8[ns]")
result = DatetimeArray._simple_new(result)
result = result.tz_localize(self.tz)
except NotImplementedError:
warnings.warn(
"Non-vectorized DateOffset being applied to Series or DatetimeIndex.",
PerformanceWarning,
)
result = self.astype("O") + offset
if not len(self):
# GH#30336 _from_sequence won't be able to infer self.tz
return type(self)._from_sequence(result).tz_localize(self.tz)
return type(self)._from_sequence(result)
def _sub_datetimelike_scalar(self, other):
# subtract a datetime from myself, yielding a ndarray[timedelta64[ns]]
assert isinstance(other, (datetime, np.datetime64))
assert other is not NaT
other = Timestamp(other)
# error: Non-overlapping identity check (left operand type: "Timestamp",
# right operand type: "NaTType")
if other is NaT: # type: ignore[comparison-overlap]
return self - NaT
if not self._has_same_tz(other):
# require tz compat
raise TypeError(
"Timestamp subtraction must have the same timezones or no timezones"
)
i8 = self.asi8
result = checked_add_with_arr(i8, -other.value, arr_mask=self._isnan)
result = self._maybe_mask_results(result)
return result.view("timedelta64[ns]")
# -----------------------------------------------------------------
# Timezone Conversion and Localization Methods
def _local_timestamps(self) -> np.ndarray:
"""
Convert to an i8 (unix-like nanosecond timestamp) representation
while keeping the local timezone and not using UTC.
This is used to calculate time-of-day information as if the timestamps
were timezone-naive.
"""
if self.tz is None or timezones.is_utc(self.tz):
return self.asi8
return tzconversion.tz_convert_from_utc(self.asi8, self.tz)
def tz_convert(self, tz) -> DatetimeArray:
"""
Convert tz-aware Datetime Array/Index from one time zone to another.
Parameters
----------
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted
to this time zone of the Datetime Array/Index. A `tz` of None will
convert to UTC and remove the timezone information.
Returns
-------
Array or Index
Raises
------
TypeError
If Datetime Array/Index is tz-naive.
See Also
--------
DatetimeIndex.tz : A timezone that has a variable offset from UTC.
DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a
given time zone, or remove timezone from a tz-aware DatetimeIndex.
Examples
--------
With the `tz` parameter, we can change the DatetimeIndex
to other time zones:
>>> dti = pd.date_range(start='2014-08-01 09:00',
... freq='H', periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert('US/Central')
DatetimeIndex(['2014-08-01 02:00:00-05:00',
'2014-08-01 03:00:00-05:00',
'2014-08-01 04:00:00-05:00'],
dtype='datetime64[ns, US/Central]', freq='H')
With the ``tz=None``, we can remove the timezone (after converting
to UTC if necessary):
>>> dti = pd.date_range(start='2014-08-01 09:00', freq='H',
... periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert(None)
DatetimeIndex(['2014-08-01 07:00:00',
'2014-08-01 08:00:00',
'2014-08-01 09:00:00'],
dtype='datetime64[ns]', freq='H')
"""
tz = timezones.maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError(
"Cannot convert tz-naive timestamps, use tz_localize to localize"
)
# No conversion since timestamps are all UTC to begin with
dtype = tz_to_dtype(tz)
return self._simple_new(self._ndarray, dtype=dtype, freq=self.freq)
@dtl.ravel_compat
def tz_localize(self, tz, ambiguous="raise", nonexistent="raise") -> DatetimeArray:
"""
Localize tz-naive Datetime Array/Index to tz-aware
Datetime Array/Index.
This method takes a time zone (tz) naive Datetime Array/Index object
and makes this time zone aware. It does not move the time to another
time zone.
This method can also be used to do the inverse -- to create a time
zone unaware object from an aware object. To that end, pass `tz=None`.
Parameters
----------
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone to convert timestamps to. Passing ``None`` will
remove the time zone information preserving local time.
ambiguous : 'infer', 'NaT', bool array, default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
Returns
-------
Same type as self
Array/Index converted to the specified time zone.
Raises
------
TypeError
If the Datetime Array/Index is tz-aware and tz is not None.
See Also
--------
DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from
one time zone to another.
Examples
--------
>>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3)
>>> tz_naive
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq='D')
Localize DatetimeIndex in US/Eastern time zone:
>>> tz_aware = tz_naive.tz_localize(tz='US/Eastern')
>>> tz_aware
DatetimeIndex(['2018-03-01 09:00:00-05:00',
'2018-03-02 09:00:00-05:00',
'2018-03-03 09:00:00-05:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
With the ``tz=None``, we can remove the time zone information
while keeping the local time (not converted to UTC):
>>> tz_aware.tz_localize(None)
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq=None)
Be careful with DST changes. When there is sequential data, pandas can
infer the DST time:
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.dt.tz_localize('CET', ambiguous='infer')
0 2018-10-28 01:30:00+02:00
1 2018-10-28 02:00:00+02:00
2 2018-10-28 02:30:00+02:00
3 2018-10-28 02:00:00+01:00
4 2018-10-28 02:30:00+01:00
5 2018-10-28 03:00:00+01:00
6 2018-10-28 03:30:00+01:00
dtype: datetime64[ns, CET]
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False]))
0 2018-10-28 01:20:00+02:00
1 2018-10-28 02:36:00+02:00
2 2018-10-28 03:46:00+01:00
dtype: datetime64[ns, CET]
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
0 2015-03-29 03:00:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
0 2015-03-29 01:59:59.999999999+01:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
0 2015-03-29 03:30:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
if self.tz is not None:
if tz is None:
new_dates = tzconversion.tz_convert_from_utc(self.asi8, self.tz)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = timezones.maybe_get_tz(tz)
# Convert to UTC
new_dates = tzconversion.tz_localize_to_utc(
self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent
)
new_dates = new_dates.view(DT64NS_DTYPE)
dtype = tz_to_dtype(tz)
freq = None
if timezones.is_utc(tz) or (len(self) == 1 and not isna(new_dates[0])):
# we can preserve freq
# TODO: Also for fixed-offsets
freq = self.freq
elif tz is None and self.tz is None:
# no-op
freq = self.freq
return self._simple_new(new_dates, dtype=dtype, freq=freq)
# ----------------------------------------------------------------
# Conversion Methods - Vectorized analogues of Timestamp methods
def to_pydatetime(self) -> npt.NDArray[np.object_]:
"""
Return Datetime Array/Index as object ndarray of datetime.datetime
objects.
Returns
-------
datetimes : ndarray[object]
"""
return ints_to_pydatetime(self.asi8, tz=self.tz)
def normalize(self) -> DatetimeArray:
"""
Convert times to midnight.
The time component of the date-time is converted to midnight i.e.
00:00:00. This is useful in cases, when the time does not matter.
Length is unaltered. The timezones are unaffected.
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on Datetime Array/Index.
Returns
-------
DatetimeArray, DatetimeIndex or Series
The same type as the original data. Series will have the same
name and index. DatetimeIndex will have the same name.
See Also
--------
floor : Floor the datetimes to the specified freq.
ceil : Ceil the datetimes to the specified freq.
round : Round the datetimes to the specified freq.
Examples
--------
>>> idx = pd.date_range(start='2014-08-01 10:00', freq='H',
... periods=3, tz='Asia/Calcutta')
>>> idx
DatetimeIndex(['2014-08-01 10:00:00+05:30',
'2014-08-01 11:00:00+05:30',
'2014-08-01 12:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq='H')
>>> idx.normalize()
DatetimeIndex(['2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq=None)
"""
new_values = normalize_i8_timestamps(self.asi8, self.tz)
return type(self)(new_values)._with_freq("infer").tz_localize(self.tz)
@dtl.ravel_compat
def to_period(self, freq=None) -> PeriodArray:
"""
Cast to PeriodArray/Index at a particular frequency.
Converts DatetimeArray/Index to PeriodArray/Index.
Parameters
----------
freq : str or Offset, optional
One of pandas' :ref:`offset strings <timeseries.offset_aliases>`
or an Offset object. Will be inferred by default.
Returns
-------
PeriodArray/Index
Raises
------
ValueError
When converting a DatetimeArray/Index with non-regular values,
so that a frequency cannot be inferred.
See Also
--------
PeriodIndex: Immutable ndarray holding ordinal values.
DatetimeIndex.to_pydatetime: Return DatetimeIndex as object.
Examples
--------
>>> df = pd.DataFrame({"y": [1, 2, 3]},
... index=pd.to_datetime(["2000-03-31 00:00:00",
... "2000-05-31 00:00:00",
... "2000-08-31 00:00:00"]))
>>> df.index.to_period("M")
PeriodIndex(['2000-03', '2000-05', '2000-08'],
dtype='period[M]')
Infer the daily frequency
>>> idx = pd.date_range("2017-01-01", periods=2)
>>> idx.to_period()
PeriodIndex(['2017-01-01', '2017-01-02'],
dtype='period[D]')
"""
from pandas.core.arrays import PeriodArray
if self.tz is not None:
warnings.warn(
"Converting to PeriodArray/Index representation "
"will drop timezone information.",
UserWarning,
)
if freq is None:
freq = self.freqstr or self.inferred_freq
if freq is None:
raise ValueError(
"You must pass a freq argument as current index has none."
)
res = get_period_alias(freq)
# https://github.com/pandas-dev/pandas/issues/33358
if res is None:
res = freq
freq = res
return PeriodArray._from_datetime64(self._ndarray, freq, tz=self.tz)
def to_perioddelta(self, freq) -> TimedeltaArray:
"""
Calculate TimedeltaArray of difference between index
values and index converted to PeriodArray at specified
freq. Used for vectorized offsets.
Parameters
----------
freq : Period frequency
Returns
-------
TimedeltaArray/Index
"""
# Deprecaation GH#34853
warnings.warn(
"to_perioddelta is deprecated and will be removed in a "
"future version. "
"Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead.",
FutureWarning,
# stacklevel chosen to be correct for when called from DatetimeIndex
stacklevel=3,
)
from pandas.core.arrays.timedeltas import TimedeltaArray
i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8
m8delta = i8delta.view("m8[ns]")
return TimedeltaArray(m8delta)
# -----------------------------------------------------------------
# Properties - Vectorized Timestamp Properties/Methods
def month_name(self, locale=None):
"""
Return the month names of the DateTimeIndex with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the month name.
Default is English locale.
Returns
-------
Index
Index of month names.
Examples
--------
>>> idx = pd.date_range(start='2018-01', freq='M', periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[ns]', freq='M')
>>> idx.month_name()
Index(['January', 'February', 'March'], dtype='object')
"""
values = self._local_timestamps()
result = fields.get_date_name_field(values, "month_name", locale=locale)
result = self._maybe_mask_results(result, fill_value=None)
return result
def day_name(self, locale=None):
"""
Return the day names of the DateTimeIndex with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the day name.
Default is English locale.
Returns
-------
Index
Index of day names.
Examples
--------
>>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3)
>>> idx
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', freq='D')
>>> idx.day_name()
Index(['Monday', 'Tuesday', 'Wednesday'], dtype='object')
"""
values = self._local_timestamps()
result = fields.get_date_name_field(values, "day_name", locale=locale)
result = self._maybe_mask_results(result, fill_value=None)
return result
@property
def time(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
timestamps = self._local_timestamps()
return ints_to_pydatetime(timestamps, box="time")
@property
def timetz(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of datetime.time also containing timezone
information. The time part of the Timestamps.
"""
return ints_to_pydatetime(self.asi8, self.tz, box="time")
@property
def date(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of python datetime.date objects (namely, the date
part of Timestamps without timezone information).
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
timestamps = self._local_timestamps()
return ints_to_pydatetime(timestamps, box="date")
def isocalendar(self) -> DataFrame:
"""
Returns a DataFrame with the year, week, and day calculated according to
the ISO 8601 standard.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
with columns year, week and day
See Also
--------
Timestamp.isocalendar : Function return a 3-tuple containing ISO year,
week number, and weekday for the given Timestamp object.
datetime.date.isocalendar : Return a named tuple object with
three components: year, week and weekday.
Examples
--------
>>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4)
>>> idx.isocalendar()
year week day
2019-12-29 2019 52 7
2019-12-30 2020 1 1
2019-12-31 2020 1 2
2020-01-01 2020 1 3
>>> idx.isocalendar().week
2019-12-29 52
2019-12-30 1
2019-12-31 1
2020-01-01 1
Freq: D, Name: week, dtype: UInt32
"""
from pandas import DataFrame
values = self._local_timestamps()
sarray = fields.build_isocalendar_sarray(values)
iso_calendar_df = DataFrame(
sarray, columns=["year", "week", "day"], dtype="UInt32"
)
if self._hasnans:
iso_calendar_df.iloc[self._isnan] = None
return iso_calendar_df
@property
def weekofyear(self):
"""
The week ordinal of the year.
.. deprecated:: 1.1.0
weekofyear and week have been deprecated.
Please use DatetimeIndex.isocalendar().week instead.
"""
warnings.warn(
"weekofyear and week have been deprecated, please use "
"DatetimeIndex.isocalendar().week instead, which returns "
"a Series. To exactly reproduce the behavior of week and "
"weekofyear and return an Index, you may call "
"pd.Int64Index(idx.isocalendar().week)",
FutureWarning,
stacklevel=3,
)
week_series = self.isocalendar().week
if week_series.hasnans:
return week_series.to_numpy(dtype="float64", na_value=np.nan)
return week_series.to_numpy(dtype="int64")
week = weekofyear
year = _field_accessor(
"year",
"Y",
"""
The year of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="Y")
... )
>>> datetime_series
0 2000-12-31
1 2001-12-31
2 2002-12-31
dtype: datetime64[ns]
>>> datetime_series.dt.year
0 2000
1 2001
2 2002
dtype: int64
""",
)
month = _field_accessor(
"month",
"M",
"""
The month as January=1, December=12.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="M")
... )
>>> datetime_series
0 2000-01-31
1 2000-02-29
2 2000-03-31
dtype: datetime64[ns]
>>> datetime_series.dt.month
0 1
1 2
2 3
dtype: int64
""",
)
day = _field_accessor(
"day",
"D",
"""
The day of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="D")
... )
>>> datetime_series
0 2000-01-01
1 2000-01-02
2 2000-01-03
dtype: datetime64[ns]
>>> datetime_series.dt.day
0 1
1 2
2 3
dtype: int64
""",
)
hour = _field_accessor(
"hour",
"h",
"""
The hours of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="h")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 01:00:00
2 2000-01-01 02:00:00
dtype: datetime64[ns]
>>> datetime_series.dt.hour
0 0
1 1
2 2
dtype: int64
""",
)
minute = _field_accessor(
"minute",
"m",
"""
The minutes of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="T")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 00:01:00
2 2000-01-01 00:02:00
dtype: datetime64[ns]
>>> datetime_series.dt.minute
0 0
1 1
2 2
dtype: int64
""",
)
second = _field_accessor(
"second",
"s",
"""
The seconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="s")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 00:00:01
2 2000-01-01 00:00:02
dtype: datetime64[ns]
>>> datetime_series.dt.second
0 0
1 1
2 2
dtype: int64
""",
)
microsecond = _field_accessor(
"microsecond",
"us",
"""
The microseconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="us")
... )
>>> datetime_series
0 2000-01-01 00:00:00.000000
1 2000-01-01 00:00:00.000001
2 2000-01-01 00:00:00.000002
dtype: datetime64[ns]
>>> datetime_series.dt.microsecond
0 0
1 1
2 2
dtype: int64
""",
)
nanosecond = _field_accessor(
"nanosecond",
"ns",
"""
The nanoseconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="ns")
... )
>>> datetime_series
0 2000-01-01 00:00:00.000000000
1 2000-01-01 00:00:00.000000001
2 2000-01-01 00:00:00.000000002
dtype: datetime64[ns]
>>> datetime_series.dt.nanosecond
0 0
1 1
2 2
dtype: int64
""",
)
_dayofweek_doc = """
The day of the week with Monday=0, Sunday=6.
Return the day of the week. It is assumed the week starts on
Monday, which is denoted by 0 and ends on Sunday which is denoted
by 6. This method is available on both Series with datetime
values (using the `dt` accessor) or DatetimeIndex.
Returns
-------
Series or Index
Containing integers indicating the day number.
See Also
--------
Series.dt.dayofweek : Alias.
Series.dt.weekday : Alias.
Series.dt.day_name : Returns the name of the day of the week.
Examples
--------
>>> s = pd.date_range('2016-12-31', '2017-01-08', freq='D').to_series()
>>> s.dt.dayofweek
2016-12-31 5
2017-01-01 6
2017-01-02 0
2017-01-03 1
2017-01-04 2
2017-01-05 3
2017-01-06 4
2017-01-07 5
2017-01-08 6
Freq: D, dtype: int64
"""
day_of_week = _field_accessor("day_of_week", "dow", _dayofweek_doc)
dayofweek = day_of_week
weekday = day_of_week
day_of_year = _field_accessor(
"dayofyear",
"doy",
"""
The ordinal day of the year.
""",
)
dayofyear = day_of_year
quarter = _field_accessor(
"quarter",
"q",
"""
The quarter of the date.
""",
)
days_in_month = _field_accessor(
"days_in_month",
"dim",
"""
The number of days in the month.
""",
)
daysinmonth = days_in_month
_is_month_doc = """
Indicates whether the date is the {first_or_last} day of the month.
Returns
-------
Series or array
For Series, returns a Series with boolean values.
For DatetimeIndex, returns a boolean array.
See Also
--------
is_month_start : Return a boolean indicating whether the date
is the first day of the month.
is_month_end : Return a boolean indicating whether the date
is the last day of the month.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> s = pd.Series(pd.date_range("2018-02-27", periods=3))
>>> s
0 2018-02-27
1 2018-02-28
2 2018-03-01
dtype: datetime64[ns]
>>> s.dt.is_month_start
0 False
1 False
2 True
dtype: bool
>>> s.dt.is_month_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2018-02-27", periods=3)
>>> idx.is_month_start
array([False, False, True])
>>> idx.is_month_end
array([False, True, False])
"""
is_month_start = _field_accessor(
"is_month_start", "is_month_start", _is_month_doc.format(first_or_last="first")
)
is_month_end = _field_accessor(
"is_month_end", "is_month_end", _is_month_doc.format(first_or_last="last")
)
is_quarter_start = _field_accessor(
"is_quarter_start",
"is_quarter_start",
"""
Indicator for whether the date is the first day of a quarter.
Returns
-------
is_quarter_start : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_end : Similar property for indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_start=df.dates.dt.is_quarter_start)
dates quarter is_quarter_start
0 2017-03-30 1 False
1 2017-03-31 1 False
2 2017-04-01 2 True
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_quarter_start
array([False, False, True, False])
""",
)
is_quarter_end = _field_accessor(
"is_quarter_end",
"is_quarter_end",
"""
Indicator for whether the date is the last day of a quarter.
Returns
-------
is_quarter_end : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_start : Similar property indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_end=df.dates.dt.is_quarter_end)
dates quarter is_quarter_end
0 2017-03-30 1 False
1 2017-03-31 1 True
2 2017-04-01 2 False
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_quarter_end
array([False, True, False, False])
""",
)
is_year_start = _field_accessor(
"is_year_start",
"is_year_start",
"""
Indicate whether the date is the first day of a year.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_end : Similar property indicating the last day of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_start
0 False
1 False
2 True
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_year_start
array([False, False, True])
""",
)
is_year_end = _field_accessor(
"is_year_end",
"is_year_end",
"""
Indicate whether the date is the last day of the year.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_start : Similar property indicating the start of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_year_end
array([False, True, False])
""",
)
is_leap_year = _field_accessor(
"is_leap_year",
"is_leap_year",
"""
Boolean indicator if the date belongs to a leap year.
A leap year is a year, which has 366 days (instead of 365) including
29th of February as an intercalary day.
Leap years are years which are multiples of four with the exception
of years divisible by 100 but not by 400.
Returns
-------
Series or ndarray
Booleans indicating if dates belong to a leap year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> idx = pd.date_range("2012-01-01", "2015-01-01", freq="Y")
>>> idx
DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'],
dtype='datetime64[ns]', freq='A-DEC')
>>> idx.is_leap_year
array([ True, False, False])
>>> dates_series = pd.Series(idx)
>>> dates_series
0 2012-12-31
1 2013-12-31
2 2014-12-31
dtype: datetime64[ns]
>>> dates_series.dt.is_leap_year
0 True
1 False
2 False
dtype: bool
""",
)
def to_julian_date(self) -> np.ndarray:
"""
Convert Datetime Array to float64 ndarray of Julian Dates.
0 Julian date is noon January 1, 4713 BC.
https://en.wikipedia.org/wiki/Julian_day
"""
# http://mysite.verizon.net/aesir_research/date/jdalg2.htm
year = np.asarray(self.year)
month = np.asarray(self.month)
day = np.asarray(self.day)
testarr = month < 3
year[testarr] -= 1
month[testarr] += 12
return (
day
+ np.fix((153 * month - 457) / 5)
+ 365 * year
+ np.floor(year / 4)
- np.floor(year / 100)
+ np.floor(year / 400)
+ 1_721_118.5
+ (
self.hour
+ self.minute / 60
+ self.second / 3600
+ self.microsecond / 3600 / 10 ** 6
+ self.nanosecond / 3600 / 10 ** 9
)
/ 24
)
# -----------------------------------------------------------------
# Reductions
def std(
self,
axis=None,
dtype=None,
out=None,
ddof: int = 1,
keepdims: bool = False,
skipna: bool = True,
):
# Because std is translation-invariant, we can get self.std
# by calculating (self - Timestamp(0)).std, and we can do it
# without creating a copy by using a view on self._ndarray
from pandas.core.arrays import TimedeltaArray
tda = TimedeltaArray(self._ndarray.view("i8"))
return tda.std(
axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims, skipna=skipna
)
# -------------------------------------------------------------------
# Constructor Helpers
@overload
def sequence_to_datetimes(
data, allow_object: Literal[False] = ..., require_iso8601: bool = ...
) -> DatetimeArray:
...
@overload
def sequence_to_datetimes(
data, allow_object: Literal[True] = ..., require_iso8601: bool = ...
) -> np.ndarray | DatetimeArray:
...
def sequence_to_datetimes(
data, allow_object: bool = False, require_iso8601: bool = False
) -> np.ndarray | DatetimeArray:
"""
Parse/convert the passed data to either DatetimeArray or np.ndarray[object].
"""
result, tz, freq = sequence_to_dt64ns(
data,
allow_object=allow_object,
allow_mixed=True,
require_iso8601=require_iso8601,
)
if result.dtype == object:
return result
dtype = tz_to_dtype(tz)
dta = DatetimeArray._simple_new(result, freq=freq, dtype=dtype)
return dta
def sequence_to_dt64ns(
data,
dtype=None,
copy=False,
tz=None,
dayfirst=False,
yearfirst=False,
ambiguous="raise",
*,
allow_object: bool = False,
allow_mixed: bool = False,
require_iso8601: bool = False,
):
"""
Parameters
----------
data : list-like
dtype : dtype, str, or None, default None
copy : bool, default False
tz : tzinfo, str, or None, default None
dayfirst : bool, default False
yearfirst : bool, default False
ambiguous : str, bool, or arraylike, default 'raise'
See pandas._libs.tslibs.tzconversion.tz_localize_to_utc.
allow_object : bool, default False
Whether to return an object-dtype ndarray instead of raising if the
data contains more than one timezone.
allow_mixed : bool, default False
Interpret integers as timestamps when datetime objects are also present.
require_iso8601 : bool, default False
Only consider ISO-8601 formats when parsing strings.
Returns
-------
result : numpy.ndarray
The sequence converted to a numpy array with dtype ``datetime64[ns]``.
tz : tzinfo or None
Either the user-provided tzinfo or one inferred from the data.
inferred_freq : Tick or None
The inferred frequency of the sequence.
Raises
------
TypeError : PeriodDType data is passed
"""
inferred_freq = None
dtype = _validate_dt64_dtype(dtype)
tz = timezones.maybe_get_tz(tz)
# if dtype has an embedded tz, capture it
tz = validate_tz_from_dtype(dtype, tz)
if not hasattr(data, "dtype"):
# e.g. list, tuple
if np.ndim(data) == 0:
# i.e. generator
data = list(data)
data = np.asarray(data)
copy = False
elif isinstance(data, ABCMultiIndex):
raise TypeError("Cannot create a DatetimeArray from a MultiIndex.")
else:
data = | extract_array(data, extract_numpy=True) | pandas.core.construction.extract_array |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True, categories=False, auxcats=False):
"""
Frame a time series as a supervised learning dataset.
Arguments:
data: Sequence of observations as a list, df, or NumPy array.
n_in: (int) Number of lag observations as input (X).
n_out: (int) Number of observations as output (y).
dropnan: Boolean whether or not to drop rows with NaN values.
categories: (list) Categories to keep
Returns:
Pandas DataFrame of series framed for supervised learning.
"""
df = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
from tqdm import trange
import os, sys
def download(refresh=False):
# requirements
import requests, zipfile, io, os
# we can override re-downloading to the data folder if we want
if not refresh:
return
print('Scraping for all downloads. . .')
# scrape thge website for all available files
from lxml import html
import requests
# get the page
page = requests.get('https://data.sa.gov.au/data/dataset/adelaide-metrocard-validations')
# extract into tree
tree = html.fromstring(page.content)
# get the urls
urls = tree.xpath('//a[@target="_blank"]/@href')
# extract only urls that are datasets
urls = list(filter(lambda url: url[:36] == 'https://data.sa.gov.au/data/dataset/', urls))
# extract only the links that have string '/download/bandedvalidations20' in them
urls = list(filter(lambda url: '/download/bandedvalidations20' in url, urls))
# download and extract the urls
print('Downloading files from found download urls!')
for i in trange(len(urls)):
# get zip
r = requests.get(urls[i])
# if the url gets a .zip, extract the file
if urls[i][-4:] == '.zip':
# encode as zipfile
z = zipfile.ZipFile(io.BytesIO(r.content))
# extract
z.extractall('data')
# if the url gets a .csv, save it
elif urls[i][-4:] == '.csv':
# write with the last 34 characters as the file name
with open(os.path.join('data', urls[i][-34:]), 'w') as f:
f.write(r.text)
def load(csvs=None, nrows=None):
# construct paths for data files if not supplied, platform independent
if not csvs:
csvs = [os.path.join('data', csv) for csv in os.listdir('data')]
# attempt to remove any hidden files
csvs = list(filter(lambda csv: csv[0] != '.', csvs))
# remove too short candidate csv files
csvs = list(filter(lambda csv: len(csv) > 4, csvs))
# remove non csv file suffixes
csvs = list(filter(lambda csv: csv[-4:] == '.csv', csvs))
# if there is no data exit early
if len(csvs) < 1: sys.exit('No data in data folder!')
print('Loading all data into DataFrame. . .')
# platform independent read all csv in 'data' folder
df = pd.concat([ | pd.read_csv(csv, nrows=nrows) | pandas.read_csv |
# %% Imports
import os
import glob
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patheffects as path_effects
import seaborn as sns
from sklearn.linear_model import LinearRegression
from scipy.optimize import least_squares
from ruamel_yaml import YAML
original_np_seterr = np.seterr(all='raise') # raise exceptions rather than warnings
# %% The initial set of countries that we're interested in
countries = ["Australia", "Austria", "Belgium", "Brazil", "Canada", "Chile", "China", "Czechia", "Denmark", "Ecuador", "Finland", "France", "Germany", "Greece", "Iceland", "Indonesia", "Iran", "Ireland", "Israel", "Italy", "Japan", "Luxembourg", "Malaysia", "Netherlands", "Norway", "Pakistan", "Poland", "Portugal", "Saudi Arabia", "South Korea", "Spain", "Sweden", "Switzerland", "Thailand", "Turkey", "United States", "United Kingdom"]
use_all_countries = False # if set to 'True', reset 'countries' to all countries found in the data, after the data is loaded
us_states = ["California", "New York", "Texas", "Arizona", "Florida", "Washington"]; # cherrypick these states from the NY Times US Data Set
ca_provinces = ["Ontario", "Quebec"] # cherrypick these provinces from the JHU Data Set
statuses = ['confirmed', 'deaths'] # leave out 'recovered' for now since they are less informative and make the plots confusing
# %% Load the data from the external repository
ts_global = {
'confirmed': pd.read_csv("https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv", header=0, index_col=1),
'deaths': pd.read_csv("https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv", header=0, index_col=1),
'recovered': pd.read_csv("https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv", header=0, index_col=1),
}
ts_nytimes = | pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv", header=0, index_col=0) | pandas.read_csv |
import json
import io
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import dash
from dash import html
from dash import dcc
import dash_bootstrap_components as dbc
import pandas as pd
import numpy as np
import plotly.express as px
from dash.dependencies import Output, Input, State
from datetime import datetime, timedelta
from server import app
import plotly.graph_objects as go
import plotly.express as px
from sqlalchemy import create_engine
from flask import send_file
import os
from joblib import Parallel, delayed
from dash.exceptions import PreventUpdate
import time
import re
def discriminated_antis(all_antis):
try:
df_抗菌药物 = pd.read_csv(r'./抗菌药物字典.csv')
except:
df_抗菌药物 = pd.read_csv(r'./抗菌药物字典.csv', encoding='gbk')
def isanti(x):
df_抗菌药物['药品'] = x.抗菌药物
df1 = df_抗菌药物[df_抗菌药物['规则等级']==1]
if x.抗菌药物 in list(df1['匹配规则'].values):
return df1[df1['匹配规则']==x.抗菌药物].reset_index(drop=True).loc[0]['抗菌药物通用名']
else:
df2 = df_抗菌药物[df_抗菌药物['规则等级']==2]
df2['是否匹配'] = df2.apply(lambda y: y.抗菌药物通用名 if re.match(y.匹配规则, y.药品) else np.nan, axis=1)
df2['匹配长度'] = df2.apply(lambda y: 0 if pd.isnull(y.是否匹配) else len( y.匹配规则 ), axis=1)
if df2[~df2['是否匹配'].isnull()].shape[0]==0:
df3 = df_抗菌药物[df_抗菌药物['规则等级']==3]
df3['是否匹配'] = df3.apply(lambda y: y.抗菌药物通用名 if re.match(y.匹配规则, y.药品) else np.nan, axis=1)
df3['匹配长度'] = df3.apply(lambda y: 0 if pd.isnull(y.是否匹配) else len( y.匹配规则 ), axis=1)
if df3[~df3['是否匹配'].isnull()].shape[0]==0:
df4 = df_抗菌药物[df_抗菌药物['规则等级']==4]
df4['是否匹配'] = df4.apply(lambda y: y.抗菌药物通用名 if re.match(y.匹配规则, y.药品) else np.nan, axis=1)
df4['匹配长度'] = df4.apply(lambda y: 0 if pd.isnull(y.是否匹配) else len( y.匹配规则 ), axis=1)
if df4[~df4['是否匹配'].isnull()].shape[0]==0:
return np.nan
else:
return df4[~df4['是否匹配'].isnull()][['抗菌药物通用名','匹配长度']].drop_duplicates().sort_values(by=['匹配长度'], ascending=False).reset_index(drop=True)['抗菌药物通用名'].loc[0]#返回正则匹配成功且匹配长度最长
else:
return df3[~df3['是否匹配'].isnull()][['抗菌药物通用名','匹配长度']].drop_duplicates().sort_values(by=['匹配长度'], ascending=False).reset_index(drop=True)['抗菌药物通用名'].loc[0]#返回正则匹配成功且匹配长度最长
else:
return df2[~df2['是否匹配'].isnull()][['抗菌药物通用名','匹配长度']].drop_duplicates().sort_values(by=['匹配长度'], ascending=False).reset_index(drop=True)['抗菌药物通用名'].loc[0]#返回正则匹配成功且匹配长度最长
all_antis['抗菌药物通用名'] = all_antis.apply(isanti, axis=1)
return all_antis
# ----------------------------------------------------------------------------------------------------- 一级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取抗菌药物-菌检出-药敏一级第一张图数据
def get_first_lev_first_fig_date(engine):
res_数据时间缺失及汇总 = pd.DataFrame(columns=['业务类型', 'num', 'month' ])
# 问题类别、问题数据量统计、全数据统计
bus_dic = {
'给药': "select '给药' as 业务类型 ,count(1) as num ,substr(BEGINTIME,1,7) as month from ANTIBIOTICS where BEGINTIME is not null group by substr(BEGINTIME,1,7)",
'菌检出': " select '菌检出' as 业务类型 , count(1) as num ,substr(REQUESTTIME,1,7) as month from BACTERIA where REQUESTTIME is not null group by substr(REQUESTTIME,1,7) ",
'药敏': " select '药敏' as 业务类型 , count(1) as num ,substr(REQUESTTIME,1,7) as month from DRUGSUSCEPTIBILITY where REQUESTTIME is not null group by substr(REQUESTTIME,1,7) ",
}
for bus in bus_dic:
res_数据时间缺失及汇总 = res_数据时间缺失及汇总.append(pd.read_sql(bus_dic[bus],con=engine))
print('抗菌药物-菌检出-药敏一级图一',bus)
return res_数据时间缺失及汇总
# 更新抗菌药物-菌检出-药敏一级图一
@app.callback(
Output('anti_bar_drug_first_level_first_fig','figure'),
Output('anti_bar_drug_first_level_first_fig_data','data'),
Input('anti_bar_drug_first_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_first_fig(anti_bar_drug_first_level_first_fig_data,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
engine = create_engine(db_con_url['db'])
if anti_bar_drug_first_level_first_fig_data is None:
anti_bar_drug_first_level_first_fig_data = {}
anti_bar_drug_first_level_first_fig = get_first_lev_first_fig_date(engine)
anti_bar_drug_first_level_first_fig_data['anti_bar_drug_first_level_first_fig'] = anti_bar_drug_first_level_first_fig.to_json(orient='split', date_format='iso')
anti_bar_drug_first_level_first_fig_data['hosname'] = db_con_url['hosname']
anti_bar_drug_first_level_first_fig_data['btime'] = btime
anti_bar_drug_first_level_first_fig_data['etime'] = etime
anti_bar_drug_first_level_first_fig_data = json.dumps(anti_bar_drug_first_level_first_fig_data)
else:
anti_bar_drug_first_level_first_fig_data = json.loads(anti_bar_drug_first_level_first_fig_data)
if db_con_url['hosname'] != anti_bar_drug_first_level_first_fig_data['hosname']:
anti_bar_drug_first_level_first_fig = get_first_lev_first_fig_date(engine)
anti_bar_drug_first_level_first_fig_data['anti_bar_drug_first_level_first_fig'] = anti_bar_drug_first_level_first_fig.to_json(orient='split',date_format='iso')
anti_bar_drug_first_level_first_fig_data['hosname'] = db_con_url['hosname']
anti_bar_drug_first_level_first_fig_data = json.dumps(anti_bar_drug_first_level_first_fig_data)
else:
anti_bar_drug_first_level_first_fig = pd.read_json(anti_bar_drug_first_level_first_fig_data['anti_bar_drug_first_level_first_fig'], orient='split')
anti_bar_drug_first_level_first_fig_data = dash.no_update
#
anti_bar_drug_first_level_first_fig = anti_bar_drug_first_level_first_fig[(anti_bar_drug_first_level_first_fig['month']>=btime) & (anti_bar_drug_first_level_first_fig['month']<=etime)]
anti_bar_drug_first_level_first_fig = anti_bar_drug_first_level_first_fig.sort_values(['month','业务类型'])
fig1 = px.line(anti_bar_drug_first_level_first_fig, x='month', y='num', color='业务类型',
color_discrete_sequence=px.colors.qualitative.Dark24)
# 设置水平图例及位置
fig1.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
fig1.update_yaxes(title_text="业务数据量")
fig1.update_xaxes(title_text="时间")
return fig1,anti_bar_drug_first_level_first_fig_data
# # ----------------------------------------------------------------------------------------------------- 一级图二 ----------------------------------------------------------------------------------------------------------------------
# # 获取抗菌药物-菌检出-药敏一级第二张图数据
def get_first_lev_second_fig_date(engine,btime,etime):
res_数据关键字缺失及汇总 = pd.DataFrame(columns=['业务类型', '科室', '科室名称', 'num'])
bus_dic = {'8种耐药菌检出': f""" select '8种耐药菌检出' as 业务类型, t1.dept as 科室,t2.label as 科室名称,t1.num from
(select dept,count(1) as num from BACTERIA where BACTERIA in ('大肠埃希菌', '鲍曼不动杆菌', '肺炎克雷伯菌', '金黄色葡萄球菌', '铜绿假单胞菌', '屎肠球菌', '粪肠球菌')
and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and dept is not null
group by dept) t1,s_departments t2
where t1.dept=t2.code(+) order by t1.num desc
""",
"限制级特殊级抗菌药物使用" : f"""select '限制级特殊级抗菌药物使用' as 业务类型,t1.dept as 科室,t2.label as 科室名称,t1.num from
(select dept,count(1) as num from ANTIBIOTICS where ALEVEL in ('限制类', '特殊类')
and substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and dept is not null
group by dept) t1,s_departments t2
where t1.dept=t2.code(+) order by t1.num desc
""",
'药敏结果为耐药': f""" select '药敏结果为耐药' as 业务类型,t1.dept as 科室,t2.label as 科室名称,t1.num from
(select dept,count(1) as num from DRUGSUSCEPTIBILITY where SUSCEPTIBILITY like '%耐药%'
and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and dept is not null
group by dept) t1,s_departments t2
where t1.dept=t2.code(+) order by t1.num desc
"""
}
for bus in bus_dic:
temp = pd.read_sql(bus_dic[bus],con=engine)
temp = temp[0:8]
res_数据关键字缺失及汇总 = res_数据关键字缺失及汇总.append(temp)
return res_数据关键字缺失及汇总
# 更新一级图二
@app.callback(
Output('anti_bar_drug_first_level_second_fig','figure'),
Output('anti_bar_drug_first_level_second_fig_data','data'),
# Output('rank_month_choice','min'),
# Output('rank_month_choice','max'),
# Output('rank_month_choice','value'),
# Output('rank_month_choice','marks'),
Input('anti_bar_drug_first_level_second_fig_data','data'),
# Input('rank_month_choice','value'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# Input('rank_month_choice','marks'),
# prevent_initial_call=True
)
# def update_first_level_second_fig(anti_bar_drug_first_level_second_fig_data,rank_month_choice,db_con_url,count_time,marks):
def update_first_level_second_fig(anti_bar_drug_first_level_second_fig_data,db_con_url,count_time):
# def unixTimeMillis(dt):
# return int(time.mktime(dt.timetuple()))
#
# def unixToDatetime(unix):
# return pd.to_datetime(unix, unit='s')
#
# def getMarks(start, end, Nth=100):
# result = {}
# for i, date in enumerate(daterange):
# result[unixTimeMillis(date)] = str(date.strftime('%Y-%m'))
if db_con_url is None :
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
min = dash.no_update
max = dash.no_update
value = dash.no_update
marks = dash.no_update
if anti_bar_drug_first_level_second_fig_data is None:
anti_bar_drug_first_level_second_fig_data = {}
first_level_second_fig_data = get_first_lev_second_fig_date(engine,btime,etime)
anti_bar_drug_first_level_second_fig_data['first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split', date_format='iso')
anti_bar_drug_first_level_second_fig_data['hosname'] = db_con_url['hosname']
anti_bar_drug_first_level_second_fig_data['btime'] = btime
anti_bar_drug_first_level_second_fig_data['etime'] = etime
anti_bar_drug_first_level_second_fig_data = json.dumps(anti_bar_drug_first_level_second_fig_data)
# end_date = datetime(int(etime[0:4]), int(etime[5:7]), 1)
# start_date = datetime(int(btime[0:4]), int(btime[5:7]), 1)
# daterange = pd.date_range(start=btime+'-01', periods=((end_date.year - start_date.year) * 12 + (end_date.month - start_date.month)), freq='M')
# min = unixTimeMillis(daterange.min())
# max = unixTimeMillis(daterange.max())
# value = [unixTimeMillis(daterange.min()), unixTimeMillis(daterange.max())]
# marks = getMarks(daterange.min(), daterange.max())
else:
anti_bar_drug_first_level_second_fig_data = json.loads(anti_bar_drug_first_level_second_fig_data)
if db_con_url['hosname'] != anti_bar_drug_first_level_second_fig_data['hosname']:
first_level_second_fig_data = get_first_lev_second_fig_date(engine, btime, etime)
anti_bar_drug_first_level_second_fig_data['first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split',date_format='iso')
anti_bar_drug_first_level_second_fig_data['hosname'] = db_con_url['hosname']
anti_bar_drug_first_level_second_fig_data['btime'] = btime
anti_bar_drug_first_level_second_fig_data['etime'] = etime
anti_bar_drug_first_level_second_fig_data = json.dumps( anti_bar_drug_first_level_second_fig_data)
# end_date = datetime(int(etime[0:4]), int(etime[5:7]), 1)
# start_date = datetime(int(btime[0:4]), int(btime[5:7]), 1)
# daterange = pd.date_range(start=btime + '-01', periods=( (end_date.year - start_date.year) * 12 + (end_date.month - start_date.month)), freq='M')
# min = unixTimeMillis(daterange.min())
# max = unixTimeMillis(daterange.max())
# value = [unixTimeMillis(daterange.min()), unixTimeMillis(daterange.max())]
# print(value)
# marks = getMarks(daterange.min(), daterange.max())
else:
if anti_bar_drug_first_level_second_fig_data['btime'] != btime or anti_bar_drug_first_level_second_fig_data['etime'] != etime:
# if rank_month_choice is not None and len(rank_month_choice)>0:
# print(rank_month_choice)
# btime1 = time.gmtime(rank_month_choice[0])
# etime1 = time.gmtime(rank_month_choice[1])
# btime = f"{btime1.tm_year}-0{btime1.tm_mon}" if btime1.tm_mon<10 else f"{btime1.tm_year}-{btime1.tm_mon}"
# etime = f"{etime1.tm_year}-0{etime1.tm_mon}" if etime1.tm_mon<10 else f"{etime1.tm_year}-{etime1.tm_mon}"
# print(btime,etime)
first_level_second_fig_data = get_first_lev_second_fig_date(engine, btime, etime)
anti_bar_drug_first_level_second_fig_data[ 'first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split', date_format='iso')
anti_bar_drug_first_level_second_fig_data['btime'] = btime
anti_bar_drug_first_level_second_fig_data['etime'] = etime
anti_bar_drug_first_level_second_fig_data = json.dumps(anti_bar_drug_first_level_second_fig_data)
else:
first_level_second_fig_data = pd.read_json(anti_bar_drug_first_level_second_fig_data['first_level_second_fig_data'], orient='split')
anti_bar_drug_first_level_second_fig_data = dash.no_update
# print("一级第二张图数据:")
# print(rank_month_choice)
# print(marks)
bar = first_level_second_fig_data[first_level_second_fig_data['业务类型']=='8种耐药菌检出']
anti = first_level_second_fig_data[first_level_second_fig_data['业务类型']=='限制级特殊级抗菌药物使用']
drug = first_level_second_fig_data[first_level_second_fig_data['业务类型']=='药敏结果为耐药']
bar = bar.sort_values(['num'], ascending=True)
anti = anti.sort_values(['num'], ascending=True)
drug = drug.sort_values(['num'], ascending=True)
fig = make_subplots(rows=1,cols=3)
fig.add_trace(
go.Bar(x=anti['num'], y=anti['科室名称'], orientation='h', name='给药', marker_color=px.colors.qualitative.Dark24[0]),
row=1, col=1
)
fig.add_trace(
go.Bar(x=drug['num'], y=drug['科室名称'], orientation='h', name='药敏',
marker_color=px.colors.qualitative.Dark24[1]),
row=1, col=2,
)
fig.add_trace(
go.Bar(x=bar['num'],y=bar['科室名称'],orientation='h',name='菌检出', marker_color=px.colors.qualitative.Dark24[2]),
row=1,col=3
)
# 设置水平图例及位置
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
return fig,anti_bar_drug_first_level_second_fig_data
# return fig,anti_bar_drug_first_level_second_fig_data,min ,max ,value ,marks
# # ----------------------------------------------------------------------------------------------------- 二级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取抗菌药物二级第一张图数据
def get_second_lev_first_fig_date(engine,btime,etime):
res_数据科室信息缺失及汇总 = pd.DataFrame(columns=['业务类型', 'num', 'month' ])
bus_dic = {'用药目的': f" select '用药目的缺失' as 业务类型,count(1) as num ,substr(BEGINTIME,1,7) as month from ANTIBIOTICS where (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') group by substr(BEGINTIME,1,7) ",
'药物等级': f" select '药物等级缺失' as 业务类型,count(1) as num ,substr(BEGINTIME,1,7) as month from ANTIBIOTICS t1 where (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') group by substr(BEGINTIME,1,7) ",
'医嘱开始时间大于结束时间': f" select '医嘱开始时间大于结束时间' as 业务类型,count(1) as num ,substr(BEGINTIME,1,7) as month from ANTIBIOTICS t1 where (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') and BEGINTIME is not null and ENDTIME is not null and BEGINTIME>endtime group by substr(BEGINTIME,1,7) ",
'医嘱时间在出入院时间之外' : f""" select '医嘱时间在出入院时间之外' as 业务类型,count(1) as num ,substr(BEGINTIME,1,7) as month from ANTIBIOTICS t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}')
group by substr(BEGINTIME,1,7)
""",
}
for bus in bus_dic:
res_数据科室信息缺失及汇总 = res_数据科室信息缺失及汇总.append(pd.read_sql(bus_dic[bus],con=engine))
return res_数据科室信息缺失及汇总
# 更新二级图一
@app.callback(
Output('anti_second_level_first_fig','figure'),
Output('anti_second_level_first_fig_data','data'),
Input('anti_second_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_third_fig(anti_second_level_first_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if anti_second_level_first_fig_data is None:
anti_second_level_first_fig = get_second_lev_first_fig_date(engine, btime, etime)
anti_second_level_first_fig_data={}
anti_second_level_first_fig_data['anti_second_level_first_fig'] = anti_second_level_first_fig.to_json(orient='split', date_format='iso')
anti_second_level_first_fig_data['hosname'] = db_con_url['hosname']
anti_second_level_first_fig_data['btime'] = btime
anti_second_level_first_fig_data['etime'] = etime
anti_second_level_first_fig_data = json.dumps(anti_second_level_first_fig_data)
else:
anti_second_level_first_fig_data = json.loads(anti_second_level_first_fig_data)
if db_con_url['hosname'] != anti_second_level_first_fig_data['hosname']:
anti_second_level_first_fig = get_second_lev_first_fig_date(engine, btime, etime)
anti_second_level_first_fig_data['anti_second_level_first_fig'] = anti_second_level_first_fig.to_json(orient='split',date_format='iso')
anti_second_level_first_fig_data['hosname'] = db_con_url['hosname']
anti_second_level_first_fig_data['btime'] = btime
anti_second_level_first_fig_data['etime'] = etime
anti_second_level_first_fig_data = json.dumps(anti_second_level_first_fig_data)
else:
if anti_second_level_first_fig_data['btime'] != btime or anti_second_level_first_fig_data['etime'] != etime:
anti_second_level_first_fig = get_second_lev_first_fig_date(engine, btime, etime)
anti_second_level_first_fig_data['anti_second_level_first_fig'] = anti_second_level_first_fig.to_json(orient='split',date_format='iso')
anti_second_level_first_fig_data['btime'] = btime
anti_second_level_first_fig_data['etime'] = etime
anti_second_level_first_fig_data = json.dumps(anti_second_level_first_fig_data)
else:
anti_second_level_first_fig = pd.read_json(anti_second_level_first_fig_data['anti_second_level_first_fig'], orient='split')
anti_second_level_first_fig_data = dash.no_update
fig_概览一级_科室映射缺失 = go.Figure()
bus_opts = anti_second_level_first_fig[['业务类型']].drop_duplicates().reset_index(drop=True)
# res_数据科室信息缺失及汇总 = anti_second_level_first_fig.sort_values(['month','业务类型'])
print(anti_second_level_first_fig)
for tem,bus in bus_opts.iterrows():
print(tem,)
print(bus,)
temp = anti_second_level_first_fig[anti_second_level_first_fig['业务类型']==bus['业务类型']]
print(temp)
temp = temp.sort_values(['month'])
if temp.shape[0]>0:
fig_概览一级_科室映射缺失.add_trace(
go.Scatter(x=temp['month'], y=temp['num'], name=bus['业务类型'] ,marker_color=px.colors.qualitative.Dark24[tem] )
)
fig_概览一级_科室映射缺失.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
)
)
fig_概览一级_科室映射缺失.update_yaxes(title_text="问题数量")
fig_概览一级_科室映射缺失.update_xaxes(title_text="月份")
return fig_概览一级_科室映射缺失,anti_second_level_first_fig_data
# 下载二级图一明细
@app.callback(
Output('anti_second_level_first_fig_date_detail', 'data'),
Input('anti_second_level_first_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
bus_dic = {
'用药目的缺失': f" select * from ANTIBIOTICS where (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') ",
'药物等级缺失': f" select t1.* from ANTIBIOTICS t1 where (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') ",
'医嘱开始时间大于结束时间': f" select t1.* from ANTIBIOTICS t1 where (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') and BEGINTIME is not null and ENDTIME is not null and BEGINTIME>endtime ",
'医嘱时间在出入院时间之外': f""" select t1.* from ANTIBIOTICS t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') group by substr(BEGINTIME,1,7)
""",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key], con=engine)
if temp.shape[0] > 0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'], columns=[key])
error_df.to_excel(writer, sheet_name=key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}抗菌药物问题数据明细.xlsx')
else:
return dash.no_update
# # ----------------------------------------------------------------------------------------------------- 二级图二 ----------------------------------------------------------------------------------------------------------------------
# 获取抗菌药物二级第二张图数据
def get_second_level_second_fig_date(engine,btime,etime):
res_业务逻辑问题数据汇总 = pd.read_sql(f" select ANAME as 抗菌药物,count(1) as num , substr(BEGINTIME,1,7) as 月份 from antibiotics where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' group by substr(BEGINTIME,1,7),ANAME ",con=engine)
return res_业务逻辑问题数据汇总
# 更新二级图
@app.callback(
Output('anti_second_level_second_fig','figure'),
Output('anti_second_level_second_fig_data','data'),
Input('anti_second_level_second_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_second_level_fig(anti_second_level_second_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if anti_second_level_second_fig_data is None:
anti_second_level_second_fig_data = {}
anti_second_level_second_fig = get_second_level_second_fig_date(engine, btime, etime)
anti_second_level_second_fig_data['anti_second_level_second_fig'] = anti_second_level_second_fig.to_json(orient='split', date_format='iso')
anti_second_level_second_fig_data['hosname'] = db_con_url['hosname']
anti_second_level_second_fig_data['btime'] = btime
anti_second_level_second_fig_data['etime'] = etime
anti_second_level_second_fig_data = json.dumps(anti_second_level_second_fig_data)
else:
anti_second_level_second_fig_data = json.loads(anti_second_level_second_fig_data)
if db_con_url['hosname'] != anti_second_level_second_fig_data['hosname']:
anti_second_level_second_fig = get_second_level_second_fig_date(engine, btime, etime)
anti_second_level_second_fig_data['anti_second_level_second_fig'] = anti_second_level_second_fig.to_json(orient='split',date_format='iso')
anti_second_level_second_fig_data['hosname'] = db_con_url['hosname']
anti_second_level_second_fig_data['btime'] = btime
anti_second_level_second_fig_data['etime'] = etime
anti_second_level_second_fig_data = json.dumps(anti_second_level_second_fig_data)
else:
if anti_second_level_second_fig_data['btime'] != btime or anti_second_level_second_fig_data['etime'] != etime:
anti_second_level_second_fig = get_second_level_second_fig_date(engine, btime, etime)
anti_second_level_second_fig_data['anti_second_level_second_fig'] = anti_second_level_second_fig.to_json(orient='split',date_format='iso')
anti_second_level_second_fig_data['btime'] = btime
anti_second_level_second_fig_data['etime'] = etime
anti_second_level_second_fig_data = json.dumps(anti_second_level_second_fig_data)
else:
anti_second_level_second_fig = pd.read_json(anti_second_level_second_fig_data['anti_second_level_second_fig'], orient='split')
anti_second_level_second_fig_data = dash.no_update
antis_dict = discriminated_antis(anti_second_level_second_fig[['抗菌药物']].drop_duplicates())
anti_second_level_second_fig = anti_second_level_second_fig.merge(antis_dict,on='抗菌药物',how='left')
anti_second_level_second_fig['抗菌药物通用名'] = np.where(anti_second_level_second_fig['抗菌药物通用名'].isnull(),anti_second_level_second_fig['抗菌药物'],anti_second_level_second_fig['抗菌药物通用名'])
anti_second_level_second_fig = anti_second_level_second_fig.sort_values(['月份'])
fig = px.bar(anti_second_level_second_fig, x="月份", y="num", color='抗菌药物通用名' ,color_discrete_sequence=px.colors.qualitative.Dark24)
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
#title=f"{btime}--{etime}",
)
fig.update_yaxes(title_text="医嘱数量", )
fig.update_xaxes(title_text="月份", )
return fig,anti_second_level_second_fig_data
# ----------------------------------------------------------------------------------------------------- 二级图三 ----------------------------------------------------------------------------------------------------------------------
# 获取抗菌药物二级第三张图数据
def get_second_level_third_fig_date(engine,btime,etime):
res_业务逻辑问题数据汇总 = pd.read_sql(
f" select ALEVEL as 抗菌药物等级,count(1) as num , substr(BEGINTIME,1,7) as 月份 from antibiotics where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and ALEVEL is not null group by substr(BEGINTIME,1,7),ALEVEL ",
con=engine)
return res_业务逻辑问题数据汇总
# 三级第一张图更新
@app.callback(
Output('anti_second_level_third_fig','figure'),
Output('anti_second_level_third_fig_data', 'data'),
Input('anti_second_level_third_fig_data', 'data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
)
def update_third_level_first_fig(anti_second_level_third_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if anti_second_level_third_fig_data is None:
anti_second_level_third_fig_data = {}
anti_second_level_third_fig = get_second_level_third_fig_date(engine, btime, etime)
anti_second_level_third_fig_data['anti_second_level_third_fig'] = anti_second_level_third_fig.to_json( orient='split', date_format='iso')
anti_second_level_third_fig_data['hosname'] = db_con_url['hosname']
anti_second_level_third_fig_data['btime'] = btime
anti_second_level_third_fig_data['etime'] = etime
anti_second_level_third_fig_data = json.dumps(anti_second_level_third_fig_data)
else:
anti_second_level_third_fig_data = json.loads(anti_second_level_third_fig_data)
if db_con_url['hosname'] != anti_second_level_third_fig_data['hosname']:
anti_second_level_third_fig = get_second_level_third_fig_date(engine, btime, etime)
anti_second_level_third_fig_data['anti_second_level_third_fig'] = anti_second_level_third_fig.to_json(orient='split', date_format='iso')
anti_second_level_third_fig_data['hosname'] = db_con_url['hosname']
anti_second_level_third_fig_data['btime'] = btime
anti_second_level_third_fig_data['etime'] = etime
anti_second_level_third_fig_data = json.dumps(anti_second_level_third_fig_data)
else:
if anti_second_level_third_fig_data['btime'] != btime or anti_second_level_third_fig_data['etime'] != etime:
anti_second_level_third_fig = get_second_level_third_fig_date(engine, btime, etime)
anti_second_level_third_fig_data['anti_second_level_third_fig'] = anti_second_level_third_fig.to_json(orient='split', date_format='iso')
anti_second_level_third_fig_data['btime'] = btime
anti_second_level_third_fig_data['etime'] = etime
anti_second_level_third_fig_data = json.dumps(anti_second_level_third_fig_data)
else:
anti_second_level_third_fig = pd.read_json( anti_second_level_third_fig_data['anti_second_level_third_fig'], orient='split')
anti_second_level_third_fig_data = dash.no_update
anti_second_level_third_fig = anti_second_level_third_fig.sort_values(['月份'])
fig = px.bar(anti_second_level_third_fig, x="月份", y="num", color='抗菌药物等级', color_discrete_sequence=px.colors.qualitative.Dark24)
fig.update_layout(
margin=dict(l=30, r=30, t=30, b=30),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
),
)
fig.update_yaxes(title_text="医嘱数量", )
fig.update_xaxes(title_text="月份", )
return fig,anti_second_level_third_fig_data
# # ----------------------------------------------------------------------------------------------------- 三级图一 ----------------------------------------------------------------------------------------------------------------------
# # 获取菌检出三级第一张图数据
def get_third_level_first_fig_date(engine,btime,etime):
res = | pd.read_sql(f"""select substr(REQUESTTIME,1,7) as month,BACTERIA as 菌,count(1) as num from BACTERIA where BACTERIA in ('大肠埃希菌', '鲍曼不动杆菌', '肺炎克雷伯菌', '金黄色葡萄球菌', '铜绿假单胞菌', '屎肠球菌', '粪肠球菌')
and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}'
group by BACTERIA, substr(REQUESTTIME,1,7)
""",con=engine) | pandas.read_sql |
import os
import os.path as path
import sys
sys.path.append(path.dirname(path.abspath(__file__)))
import numpy as np
import pandas as pd
import concurrent.futures
import argparse
import json
import traceback
import tracemalloc
from functools import reduce
import pyFigure
def computeGasPhaseO2Conc(df):
#df is a grouped dataframe
df_gas=df[df["eps"]>1.0-1e-6]
if(df_gas.shape[0]==0):
return 0.0
else:
num=df_gas.shape[0]
O2Conc=np.sum(df_gas["O2Conc"])/num
return O2Conc
def computeAverageCokeFraction(df):
num=df.shape[0]
df_coke=df[df["coke"]>1e-6]
if(df_coke.shape[0]==0):
return 0.0
else:
average_coke_fraction=np.sum(df_coke["coke"])/num
return average_coke_fraction
def computeAverageQdot(df):
num=df.shape[0]
df_coke=df[df["coke"]>1e-6]
if(df_coke.shape[0]==0):
return 0.0
else:
average_Qdot=np.sum(df_coke["Qdot"])/num
return average_Qdot
def ComputeO2Flux(df):
#df is a grouped dataframe
df["O2AdvFlux"]=df["rho"]*df["U_0"]*df["O2"]
totalO2Flux=np.sum(df["O2AdvFlux"])
return totalO2Flux
def computeTransverselyAverages(df):
df_group=df.groupby("x")
df_meanT=df_group["T"].mean()
df_meanT=df_meanT.reset_index()
df_O2Conc=df_group.apply(computeGasPhaseO2Conc)
df_O2Conc=df_O2Conc.reset_index(name = "O2Conc")
df_mean_coke=df_group.apply(computeAverageCokeFraction)
df_mean_coke=df_mean_coke.reset_index(name = "coke")
df_mean_Qdot=df_group.apply(computeAverageQdot)
df_mean_Qdot=df_mean_Qdot.reset_index(name = "Qdot")
df_O2AdvFlux=df_group.apply(ComputeO2Flux)
df_O2AdvFlux=df_O2AdvFlux.reset_index(name = "O2AdvFlux")
dfs = [df_meanT, df_O2Conc, df_mean_coke, df_mean_Qdot,df_O2AdvFlux]
df_combined=reduce(lambda df_left,df_right: pd.merge(df_left, df_right),dfs)
return df_combined
def computeTransverselyAveragesAndSave(df,save_path):
try:
df_result=computeTransverselyAverages(df)
df_result.to_csv(save_path,index=False)
except Exception as e:
errmsg=f"Unhandled exception happened: {e} with stack trace {traceback.format_exc()}"
print(errmsg)
return False
return True
def readAndcomputeTransverselyAveragesAndSave(data_folder,time,save_folder):
df=pyFigure.read_data_and_process(data_folder,time)
save_path=os.path.join(save_folder,f"{time}.csv")
print(f"save transversely averaged data to :{save_path}")
ret=computeTransverselyAveragesAndSave(df,save_path)
return ret
def batchComputeTransverselyAveragesForAll(data_folder,save_folder,worker_num=8):
time_names=pyFigure.get_times_from_data_folder(data_folder)
batchComputeTransverselyAverages(data_folder, save_folder, time_names, worker_num)
def batchComputeTransverselyAverages(data_folder, save_folder, time_names,worker_num=8):
print(f"time names: {time_names}")
print(f"save folders: {save_folder}")
if not os.path.exists(save_folder):
os.mkdir(save_folder)
futures=[]
results=[]
with concurrent.futures.ProcessPoolExecutor(max_workers=worker_num) as executor:
for time in time_names:
future=executor.submit(readAndcomputeTransverselyAveragesAndSave,\
data_folder,time,save_folder)
futures.append(future)
for _, future in enumerate(futures):
ret=future.result()
results.append(ret)
results=np.array(results)
print(f"processed time number: {results.shape[0]}, succeed number: {np.sum(results)}")
def computeMaxTemperatureAndOutletO2ConcHistory(min_max_file_path,transverse_data_folder):
times=pyFigure.get_times_from_data_folder(transverse_data_folder)
results=[]
for time in times:
df=pd.read_csv(f"{transverse_data_folder}/{time}.csv")
xmax=np.max(df["x"])
Tmax=np.max(df["T"])
O2ConcAtOutlet=list((df[df["x"]==xmax])["O2Conc"])[0]
ret={"Time":float(time),"Transverse_Tmax":Tmax,"O2ConcAtOutlet":O2ConcAtOutlet}
results.append(ret)
df_transverse_data=pd.DataFrame(results)
df_min_max=pyFigure.read_min_max_field(min_max_file_path,1,"T")
df_min_max=df_min_max[["Time","max"]]
df_combined= | pd.merge(df_min_max,df_transverse_data) | pandas.merge |
"""
Copyright 2021 Novartis Institutes for BioMedical Research Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python
__doc__ = \
"""
assay/compound querying
========================================================================
run the following command on the zscaled file
Example (PRED_DIR is the location of ZscalledAllAssays)
#time grep -aob ".$" $PRED_DIR/ZscaledAllAssays.csv > $PRED_DIR/indices
"""
class QueryCmpd:
"""
input: mtable_dir, e.g., PLS_Predictions
index file: MasterTable_indices
RFR predicted master table: MasterTable.csv
"""
def __init__(self, mtable_dir, uncertainty=False):
# initialize indices and MasterTable files
if uncertainty == True:
self.index_file = '{}/RFR_Predictions/ZscaledAllAssaysCID_indices'.format(mtable_dir)
self.MasterTable = '{}/RFR_Predictions/ZscaledAllAssaysCID.csv'.format(mtable_dir)
else:
self.index_file = '{}/PLS_Predictions/ZscaledAllAssaysCID_indices'.format(mtable_dir)
self.MasterTable = '{}/PLS_Predictions/ZscaledAllAssaysCID.csv'.format(mtable_dir)
self.indices = joblib.load(self.index_file)
self.fid = open(self.MasterTable)
self.separator = ',' if self.MasterTable.endswith('.csv') else '\t'
def columns(self):
self.fid.seek(0, 0)
line = self.fid.readline().strip().split(self.separator)
col = line[1: ]
return(col)
def idx(self):
return(list(self.indices.index)[1: ])
def get(self, cmpd, raw=False):
index = self.indices.loc[cmpd].values[0]
self.fid.seek(index, 0)
line = self.fid.readline().strip().split(self.separator)
line_name = line[0]
if raw:
return(line_name, line[1: ])
line_data = [float(x) if x != '' else 0.0 for x in line[1: ]]
return(line_name, line_data)
class QueryAssay:
"""
input: mtable_dir, e.g., PLS_Predictions
index file: MasterTable_indices
RFR predicted master table: MasterTable.csv
"""
def __init__(self, mtable_dir, uncertainty=False):
# initialize indices and MasterTable files
if uncertainty == True:
self.index_file = '{}/RFR_Predictions/ZscaledAllAssaysAID_indices'.format(mtable_dir)
self.MasterTable = '{}/RFR_Predictions/ZscaledAllAssaysAID.csv'.format(mtable_dir)
else:
self.index_file = '{}/PLS_Predictions/ZscaledAllAssaysAID_indices'.format(mtable_dir)
self.MasterTable = '{}/PLS_Predictions/ZscaledAllAssaysAID.csv'.format(mtable_dir)
self.indices = joblib.load(self.index_file)
self.indices.index = self.indices.index.map(str)
self.fid = open(self.MasterTable)
self.separator = ',' if self.MasterTable.endswith('.csv') else '\t'
def columns(self):
self.fid.seek(0, 0)
line = self.fid.readline().strip().split(self.separator)
col = line[1: ]
return(col)
def idx(self):
return(list(self.indices.index)[1: ])
def get(self, assay, raw=False):
index = self.indices.loc[assay].values[0]
self.fid.seek(index, 0)
line = self.fid.readline().strip().split(self.separator)
line_name = line[0]
if raw:
return(line_name, line[1: ])
line_data = [float(x) for x in line[1: ]]
return(line_name, line_data)
class QueryCustomCSV():
def __init__(self, mtable, scale_stat, bool_ZpIC50):
df_csv = pd.read_csv(mtable, header=0, index_col=0, sep=',')
df_csv.index = df_csv.index.astype(str)
df_stat = pd.read_csv(scale_stat, index_col=0, header=0, sep=',')
if bool_ZpIC50 == True:
cols = list(df_csv.columns)
idx = [str(i) for i in df_stat.index]
if len(set(cols) - set(idx)) == 0:
cols = [int(c) for c in cols]
df_mean = df_stat.loc[cols, 'mean_pred'].to_numpy()
df_std = df_stat.loc[cols, 'stdev_pred'].to_numpy()
df_csv = df_csv.sub(df_mean, axis=1).div(df_std, axis=1).round(3)
self.df = df_csv
def columns(self):
col = list(self.df.columns)
return(col)
def get_column(self, assay=False, idx=False):
if idx:
return(list(self.df.index))
else:
return(self.df[assay])
def get(self, cmpd):
line_data = list(self.df.loc[cmpd])
return(cmpd, line_data)
# test the compound's loading time
def get_CA(p, p_col, assays, cmpds):
df_CA = pd.DataFrame(0.0, index=assays, columns=cmpds)
cmpd_new = []
for cmpd in cmpds:
try:
name, pqsar_vec = p.get(cmpd)
df_get = pd.DataFrame([float(s) for s in pqsar_vec], index=p_col, columns=[name])
df_CA[cmpd] = df_get.loc[assays]
except:
if cmpd in list(df_CA.columns):
cmpd_new.append(cmpd)
print('Warning! {} not found'.format(cmpd))
df_CA.drop(cmpd_new, axis=1, inplace=True)
return(df_CA)
def get_list(input_list):
# get the query list
separator = ',' if input_list.endswith('.csv') else '\t'
df = pd.read_csv(input_list, header=0, index_col=0, sep=separator)
items = [str(s) for s in set(df.index)]
return(items)
def get_stat(df, thr, suffix='Zscore'):
stats = pd.DataFrame(0.0, index=df.index, columns=[])
col_name = list(df.columns)
stats['count_{}>{}'.format(suffix, thr)] = df[df[col_name] >= thr].count(axis=1)
stats['min_{}'.format(suffix)] = df.min(axis=1)
stats['mean_{}'.format(suffix)] = df.mean(axis=1).round(3)
stats['max_{}'.format(suffix)] = df.max(axis=1)
return(stats)
def check_AID(items, p_col):
real_items = [i for i in items if i in p_col]
if len(real_items) == 0:
print('Error! No AID was found')
sys.exit(1)
if len(real_items) < len(items):
fake_items = [i for i in items if i not in real_items]
print('Warning! AID {} not found'.format(fake_items))
return(real_items)
def scale2pIC50(scale_stat, df, row_AID=True, uncertainty=False):
df_scale = pd.read_csv(scale_stat, dtype={'AID':str}, index_col=0, header=0, sep=',')
#df_scale.reindex([str(d) for d in df_scale.index])
df_scale.index = df_scale.index.map(str)
if row_AID:
df = df.T.copy()
cols = list(df.columns)
df_std = df_scale.loc[cols, 'stdev_pred']
if uncertainty == True:
df = df * df_std
else:
df_mean = df_scale.loc[cols, 'mean_pred']
df = df * df_std + df_mean
if row_AID:
df = df.T
# output: cpds in the row, AID in the column
return(df)
def tidy_view(df, args):
# hide 3 columns: R^2_RF(ext), stdev_pred and mean_pred. I sort it vertically (descending) by Count_score>threshold and mean_score.
# I sort it horizontally (descending) by the column count_score>Threshold.
# If the columns are compounds, I calculate it in excel with =countif().
cols = list(df.columns)
for c in cols:
if 'count_Zscore' in c or 'count_pIC50' in c:
count_Zscore = c
if 'mean_Zscore' in c or 'mean_pIC50' in c:
mean_Zscore = c
df.sort_values(by=[count_Zscore, mean_Zscore], axis=0, inplace=True, ascending=False)
cpd_idx = []
if args.Assay or args.CA:
df.sort_values(by=[count_Zscore, mean_Zscore], axis=1, inplace=True, ascending=False)
for idx in df.index:
if 'count_Zscore' in idx or 'count_pIC50' in idx:
break
cpd_idx.append(idx)
else:
# args.Compound
cpds = []
for c in cols:
if 'count_Zscore' in c or 'count_pIC50' in c:
break
cpds.append(c)
AID = list(df.index)
thr = float(count_Zscore.split('>')[1])
df.loc['count_Zscore'] = 0.0
df.loc['count_Zscore'] = df.loc[AID, cpds][df.loc[AID, cpds] >= thr].count()
df.loc['count_Zscore'] = [float(s) for s in df.loc['count_Zscore']]
df = df.sort_values(by='count_Zscore', axis=1, ascending=False)
df.drop('count_Zscore', axis=0, inplace=True)
#tobe_moved = ['R^2_RF(ext)', 'stdev_pred', 'mean_pred', 'validity', 'delta_error_rate', 'efficiency', 'wt']
tobe_moved = ['stdev_pred', 'mean_pred']
cols = list(df.columns)
left_cols = [c for c in cols if c not in tobe_moved]
left_cols.extend(tobe_moved)
df = df[left_cols]
return(df, cpd_idx)
def save_query(df, args, out_csv, scale_stat, bool_ZpIC50):
cols = []
rows = []
for c in df.columns:
if 'count_' in c:
break
cols.append(c)
cols_left = [c for c in df.columns if c not in cols]
for i in df.index:
if 'count_' in i:
break
rows.append(i)
if args.Uncertainty.lower() in ['true', 'yes', 't', 'y']:
if args.Local:
local_dir, basename = os.path.dirname(args.Local), os.path.basename(args.Local)
local_csv_PI = os.path.join(local_dir, basename.split(',')[1])
u = QueryCustomCSV(local_csv_PI, scale_stat, False)
u_col = u.columns()
else:
u = QueryCmpd(args.Directory, uncertainty=True)
u_col = u.columns()
df_error = get_CA(u, u_col, rows, cols) if args.Compound else get_CA(u, u_col, cols, rows)
df_error = scale2pIC50(scale_stat, df_error, uncertainty=True) if bool_ZpIC50 == False else df_error
df_error = df_error if args.Compound else df_error.T
error_cols = list(df_error.columns)
error_cols = [col + '_Error' for col in error_cols]
df_error.columns = error_cols
df = df.merge(df_error, how='left', right_index=True, left_index=True)
assert len(cols) == len(error_cols)
cols_all = sum([[c, e] for c, e in zip(cols, error_cols)], []) + cols_left
df = df[cols_all]
if args.Compound:
df.round(2).to_csv(out_csv, encoding='utf-8')
else:
df.round(2).loc[rows].to_csv(out_csv, mode='a+', encoding='utf-8')
with open(out_csv, 'a+') as fid:
fid.writelines('\n')
df.drop(rows, axis=0).round(2).to_csv(out_csv, mode='a+', header=False, encoding='utf-8')
def main():
description = """Querying individual screening of pQSAR by CID and/or AID
***usage examples
***input file (.txt or .csv): querying by compounds or assays, a header line followed by compounds or assays (one item per line)
querying by compounds and assays, a header line followed by CID at the first column and AID second column.
***querying by compounds (CID) with a threshold of 3 stdev above the mean
python grepMOA2.py -c -i cid.txt -d ../chembl_28 -t 3.0 -z True -o cid_out.csv
***querying by assays (AID) with a threshold of 3 stdev above the mean
python grepMOA2.py -a -i aid.txt -d ../chembl_28 -t 3.0 -z True -o aid_out.csv
***querying by compounds (CID) and assays (AID)
python grepMOA2.py -ca -i ca_id.txt -d ../chembl_28 -z True -o ca_id_out.csv
"""
epilog = """----------------------profile-QSAR application-----------------------
"""
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
parser.add_argument('-i', '--Input', action='store', help='Input file with header line followed by querying compounds\' ID and/or assays\' AIDs', metavar='')
parser.add_argument('-d', '--Directory', help='Directory contains modeling information, default is chembl_28', type=str, metavar='')
parser.add_argument('-u', '--Uncertainty', help='Query the predicted uncertainty (Error) at 80 percent confidence level', type=str, default='False', metavar='True/False')
parser.add_argument('-l', '--Local', help='Local file (csv) of custom prediction', type=str, metavar='')
parser.add_argument('-e', '--Experimental', help='False(default): Querying predicted values; True: Querying experimental values (always returns pIC50, -z is not applicable)', type=str, default='False', metavar='True/False')
#parser.add_argument('-z', '--ZpIC50', help='Z scaled predictions of real ones', action='store_true')
parser.add_argument('-z', '--ZpIC50', help='True (default): Threshold and predictions in Z-scaled values; False: original pIC50 (log molar).', type=str, default='True', metavar='True/False')
parser.add_argument('-t', '--Threshold', help='Threshold to filter out unqualified screening', metavar='')
parser.add_argument('-o', '--Output', help='Output file in csv format', default='Query_Output.csv', metavar='')
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-c', '--Compound', action='store_true', help='Querying by compounds (one compound per row)')
index_group.add_argument('-a', '--Assay', action='store_true', help='Querying by assays (one assay per row)')
index_group.add_argument('-ca', '--CA', action='store_true', help='Querying by compounds(first column) and assays (second column)')
if len(sys.argv) < 4:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
Uncertainty = True if args.Uncertainty.lower() in ['true', 'yes', 't', 'y'] else False
if not args.Input:
print('Error: no input file ')
sys.exit(1)
# Default threshold is 3.0
#Thr = args.Threshold if args.Threshold else 3.0
# flag predicted uncertainty (error)
# Default output
bool_expr = True if args.Experimental.lower() in ['true', 'yes', 't', 'y'] else False
out_csv = args.Output if args.Output else 'Query_Output.csv'
if os.path.exists(out_csv):
os.remove(out_csv)
if bool_expr == False:
bool_ZpIC50 = True if args.ZpIC50.lower() in ['true', 'yes', 't', 'y'] else False
scale_stat = '{}/PLS_Predictions/ZscalingStats.csv'.format(args.Directory)
summary_table = '{}/Summary_pQSAR/SummaryPQSAR.csv'.format(args.Directory)
df_summary = | pd.read_csv(summary_table, index_col=0, header=0) | pandas.read_csv |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
# multiplication
assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series * 1, timedelta_series)
assert_series_equal(1 * timedelta_series, timedelta_series)
assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)
assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
datetime_series * 1
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1
with tm.assertRaises(TypeError):
datetime_series * 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1.0
# division
assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s, s2), (s2, s)]:
self.assertRaises(TypeError, lambda: x == y)
self.assertRaises(TypeError, lambda: x != y)
self.assertRaises(TypeError, lambda: x >= y)
self.assertRaises(TypeError, lambda: x > y)
self.assertRaises(TypeError, lambda: x < y)
self.assertRaises(TypeError, lambda: x <= y)
def test_more_na_comparisons(self):
for dtype in [None, object]:
left = Series(['a', np.nan, 'c'], dtype=dtype)
right = Series(['a', np.nan, 'd'], dtype=dtype)
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_nat_comparisons(self):
data = [([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')],
[pd.NaT, pd.NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')],
[pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])]
# add lhs / rhs switched data
data = data + [(r, l) for l, r in data]
for l, r in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
# Series, Index
for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]:
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
def test_nat_comparisons_scalar(self):
data = [[pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')],
[pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]]
for l in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
expected = Series([False, False, False])
assert_series_equal(left == pd.NaT, expected)
assert_series_equal(pd.NaT == left, expected)
expected = Series([True, True, True])
assert_series_equal(left != pd.NaT, expected)
assert_series_equal(pd.NaT != left, expected)
expected = Series([False, False, False])
assert_series_equal(left < pd.NaT, expected)
assert_series_equal(pd.NaT > left, expected)
assert_series_equal(left <= pd.NaT, expected)
assert_series_equal(pd.NaT >= left, expected)
assert_series_equal(left > pd.NaT, expected)
assert_series_equal(pd.NaT < left, expected)
assert_series_equal(left >= pd.NaT, expected)
assert_series_equal(pd.NaT <= left, expected)
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
self.assertRaises(ValueError, a.__lt__, b)
a = Series([1, 2])
b = Series([2, 3, 4])
self.assertRaises(ValueError, a.__eq__, b)
def test_comparison_label_based(self):
# GH 4947
# comparisons should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
# -*- coding: utf-8 -*-
import shlex
import subprocess
from unittest import TestCase
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from tstoolbox import tstoolbox, tsutils
output_peak_detection = tsutils.read_iso_ts(
b"""Datetime,0,0::peak,0::valley
2000-01-01 00:00:00,0,,
2000-01-01 01:00:00,0.258819,,
2000-01-01 02:00:00,0.5,,
2000-01-01 03:00:00,0.707107,,
2000-01-01 04:00:00,0.866025,,
2000-01-01 05:00:00,0.965926,,
2000-01-01 06:00:00,1,1,
2000-01-01 07:00:00,0.965926,,
2000-01-01 08:00:00,0.866025,,
2000-01-01 09:00:00,0.707107,,
2000-01-01 10:00:00,0.5,,
2000-01-01 11:00:00,0.258819,,
2000-01-01 12:00:00,1.22465e-16,,
2000-01-01 13:00:00,-0.258819,,
2000-01-01 14:00:00,-0.5,,
2000-01-01 15:00:00,-0.707107,,
2000-01-01 16:00:00,-0.866025,,
2000-01-01 17:00:00,-0.965926,,
2000-01-01 18:00:00,-1,,-1
2000-01-01 19:00:00,-0.965926,,
2000-01-01 20:00:00,-0.866025,,
2000-01-01 21:00:00,-0.707107,,
2000-01-01 22:00:00,-0.5,,
2000-01-01 23:00:00,-0.258819,,
"""
)
input_peak_detection = b"""Datetime,0
2000-01-01 00:00:00,0.0
2000-01-01 01:00:00,0.258819
2000-01-01 02:00:00,0.5
2000-01-01 03:00:00,0.707107
2000-01-01 04:00:00,0.866025
2000-01-01 05:00:00,0.965926
2000-01-01 06:00:00,1.0
2000-01-01 07:00:00,0.965926
2000-01-01 08:00:00,0.866025
2000-01-01 09:00:00,0.707107
2000-01-01 10:00:00,0.5
2000-01-01 11:00:00,0.258819
2000-01-01 12:00:00,1.22465e-16
2000-01-01 13:00:00,-0.258819
2000-01-01 14:00:00,-0.5
2000-01-01 15:00:00,-0.707107
2000-01-01 16:00:00,-0.866025
2000-01-01 17:00:00,-0.965926
2000-01-01 18:00:00,-1.0
2000-01-01 19:00:00,-0.965926
2000-01-01 20:00:00,-0.866025
2000-01-01 21:00:00,-0.707107
2000-01-01 22:00:00,-0.5
2000-01-01 23:00:00,-0.258819
"""
class TestPeakDetect(TestCase):
def setUp(self):
dindex = pd.date_range("2000-01-01T00:00:00", periods=24, freq="H")
self.ats = np.arange(0, 360, 15)
self.ats = np.sin(2 * np.pi * self.ats / 360)
self.ats = pd.DataFrame(self.ats, index=dindex)
self.ats = tsutils.memory_optimize(self.ats).astype("Float64")
self.compare = self.ats.copy()
self.compare = self.compare.join(
pd.Series(
np.zeros(len(self.ats)).astype("f"),
index=self.ats.index,
name="0::peak",
)
)
self.compare = self.compare.join(
pd.Series(
np.zeros(len(self.ats)).astype("f"),
index=self.ats.index,
name="0::valley",
)
)
self.compare.index.name = "Datetime"
self.compare["0::peak"] = np.nan
self.compare.loc[self.compare[0] == 1, "0::peak"] = 1
self.compare["0::valley"] = np.nan
self.compare.loc[self.compare[0] == -1, "0::valley"] = -1
self.compare = tsutils.memory_optimize(self.compare).astype("Float64")
def test_peak_rel_direct(self):
"""Test peak detection API using the default method."""
out = tstoolbox.peak_detection(
input_ts=self.ats, print_input=True, extrema="both"
)
assert_frame_equal(out, self.compare)
def test_peak_minmax_direct(self):
"""Test peak detection API using the minmax method."""
out = tstoolbox.peak_detection(
method="minmax",
window=3,
input_ts=self.ats,
print_input=True,
extrema="both",
)
assert_frame_equal(out, self.compare)
def test_peak_zero_crossing_direct(self):
"""Test peak detection API using the zero_crossing method."""
out = tstoolbox.peak_detection(
method="zero_crossing",
window=3,
input_ts=self.ats,
print_input=True,
extrema="both",
)
assert_frame_equal(out, self.compare)
# def test_peak_parabola_direct(self):
# out = tstoolbox.peak_detection(method='parabola',
# input_ts=self.ats,
# print_input=True,
# extrema='both')
# self.maxDiff = None
# assert_frame_equal(out, self.compare)
def test_peak_sine_direct(self):
"""Test peak detection API using the 'sine' method."""
out = tstoolbox.peak_detection(
method="sine", points=9, input_ts=self.ats, print_input=True, extrema="both"
)
assert_frame_equal(out, self.compare)
# CLI...
@staticmethod
def test_peak_rel_cli():
"""Test peak detection CLI using the default method."""
args = 'tstoolbox peak_detection --extrema="both" --print_input=True'
args = shlex.split(args)
out = subprocess.Popen(
args, stdout=subprocess.PIPE, stdin=subprocess.PIPE
).communicate(input=input_peak_detection)[0]
out = tsutils.read_iso_ts(out)
assert_frame_equal(out, output_peak_detection)
@staticmethod
def test_peak_minmax_cli():
"""Test peak detection CLI using the minmax method."""
args = (
"tstoolbox peak_detection "
"--window=3 "
'--method="minmax" '
'--extrema="both" '
"--print_input=True"
)
args = shlex.split(args)
out = subprocess.Popen(
args, stdout=subprocess.PIPE, stdin=subprocess.PIPE
).communicate(input=input_peak_detection)[0]
out = tsutils.read_iso_ts(out)
# input_peak_detection.to_csv("input.csv")
output_peak_detection.to_csv("output.csv")
out.to_csv("out.csv")
assert_frame_equal(out, output_peak_detection)
@staticmethod
def test_peak_zero_crossing_cli():
"""Test peak detection CLI using the zero_crossing method."""
args = (
"tstoolbox peak_detection "
'--method="zero_crossing" '
'--extrema="both" '
"--window=3 "
"--print_input=True"
)
args = shlex.split(args)
out = subprocess.Popen(
args, stdout=subprocess.PIPE, stdin=subprocess.PIPE
).communicate(input=input_peak_detection)[0]
out = tsutils.read_iso_ts(out)
| assert_frame_equal(out, output_peak_detection) | pandas.testing.assert_frame_equal |
import tsfel
import numpy as np
import pandas as pd
from tsfresh import extract_features
from tsfresh import select_features
from tsfresh.utilities.dataframe_functions import impute
import pickle
import numpy, scipy.io
acc_data = np.loadtxt(open("../original_data/acc_data.csv", "rb"), delimiter=",", skiprows=1)
gyro_data = np.loadtxt(open("../original_data/gyro_data.csv", "rb"), delimiter=",", skiprows=1)
bt_data = np.loadtxt(open("../original_data/bt_data.csv", "rb"), delimiter=",", skiprows=1)
data_acc_test = | pd.DataFrame(acc_data[:,0:3], columns=["acc_x", "acc_y", "acc_z"]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import datetime
from downscale.utils.decorators import timer_decorator
def select_range(month_begin, month_end, year_begin, year_end, date_begin, date_end):
import pandas as pd
if (month_end != month_begin) or (year_begin != year_end):
dates = pd.date_range(date_begin, date_end, freq='M')
iterator = zip(dates.day, dates.month, dates.year)
else:
dates = pd.to_datetime(date_end)
iterator = zip([dates.day], [dates.month], [dates.year])
return iterator
def select_range_7days_for_long_periods_prediction(begin="2017-8-2", end="2020-6-30", prm=None):
"""
This function takes as input a date range (begin and end) and split it in 7-days range around excluded dates
Works if we have only one splitting in a week
"""
begin = np.datetime64(pd.to_datetime(begin))
end = np.datetime64(pd.to_datetime(end))
# Define 7 days periods within date range
dates = pd.date_range(start=begin, end=end, freq="7D")
dates_shift = pd.date_range(start=begin, end=end, freq="7D").shift()
dates_shift = dates_shift.where(dates_shift <= end, [end])
# Split range around selected dates
if prm["GPU"]:
d1 = datetime.datetime(2017, 8, 1, 6)
d2 = datetime.datetime(2018, 8, 1, 6)
d3 = datetime.datetime(2019, 5, 1, 6)
d4 = datetime.datetime(2019, 6, 1, 6)
d5 = datetime.datetime(2020, 6, 2, 6)
splitting_dates = [np.datetime64(date) for date in [d1, d2, d3, d4, d5]]
else:
d1 = datetime.datetime(2017, 8, 1, 6)
d2 = datetime.datetime(2018, 8, 1, 6)
d3 = datetime.datetime(2019, 6, 1, 6)
d6 = datetime.datetime(2020, 7, 1, 6)
splitting_dates = [np.datetime64(date) for date in [d1, d2, d3, d6]]
begins = []
ends = []
for index, (begin, end) in enumerate(zip(dates.values, dates_shift.values)):
# Add one day to begin after first element
begin = begin if index == 0 else begin + np.timedelta64(1, "D")
end = end + np.timedelta64(23, "h")
if begin > end:
continue
split = False
for splt_date in splitting_dates:
# If date range needs to be splitted
if begin <= splt_date < end:
begins.append(begin)
ends.append(splt_date - np.timedelta64(1, "h"))
begins.append(splt_date)
ends.append(end)
split = True
# If we didn't split date range
if not split:
begins.append(begin)
ends.append(end)
begins = [pd.to_datetime(begin) for begin in begins]
ends = [ | pd.to_datetime(end) | pandas.to_datetime |
"""
This module enables construction of observed over expected pixels tables and
storing them inside a cooler.
It includes 2 functions.
expected_full - is a convenience function that calculates cis and trans-expected
and "stitches" them togeter. Such a stitched expected that "covers"
entire Hi-C heatmap can be easily merged with the pixel table.
obs_over_exp_generator - is a function/generator(lazy iterator) that merges
pre-calculated full expected with the pixel table in clr and yields chunks
of observed/expected pixel table. Such a "stream" can be used in cooler.create
as a "pixels" argument to write obs/exp cooler-file.
"""
import time
import logging
import numpy as np
import pandas as pd
import cooler
from cooler.tools import partition
from cooltools import (
expected_cis,
expected_trans
)
from cooltools.lib.common import (
assign_supports,
make_cooler_view
)
logging.basicConfig(level=logging.INFO)
def expected_full(
clr,
view_df=None,
smooth_cis=False,
aggregate_smoothed=False,
smooth_sigma=0.1,
aggregate_trans=False,
expected_column_name="expected",
ignore_diags=2,
clr_weight_name='weight',
chunksize=10_000_000,
nproc=4,
):
"""
Generate a DataFrame with expected for *all* 2D regions
tiling entire heatmap in clr.
Such 2D regions are defined as all pairwise combinations
of the regions in view_df. Average distance decay is calculated
for every cis-region (e.g. inter- and intra-arms), and
a "simple" average over each block is caculated for trans-
regions.
When sub-chromosomal view is provided, trans averages
can be aggregated back to the level of full chromosomes.
Parameters
----------
clr : cooler.Cooler
Cooler object
view_df : viewframe
expected is calculated for all pairwise combinations of regions
in view_df. Distance dependent expected is calculated for cis
regions, and block-level average is calculated for trans regions.
smooth_cis: bool
Apply smoothing to cis-expected. Will be stored in an additional column
aggregate_smoothed: bool
When smoothing cis expected, average over all regions, ignored without smoothing.
smooth_sigma: float
Control smoothing with the standard deviation of the smoothing Gaussian kernel.
Ignored without smoothing.
aggregate_trans : bool
Aggregate trans-expected at the inter-chromosomal level.
expected_column_name : str
Name of the column where to store combined expected
ignore_diags : int, optional
Number of intial diagonals to exclude for calculation of distance dependent
expected.
clr_weight_name : str or None
Name of balancing weight column from the cooler to use.
Use raw unbalanced data, when None.
chunksize : int, optional
Size of pixel table chunks to process
nproc : int, optional
How many processes to use for calculation
Returns
-------
expected_df: pd.DataFrame
cis and trans expected combined together
"""
# contacs vs distance - i.e. intra/cis expected
time_start = time.perf_counter()
cvd = expected_cis(
clr,
view_df=view_df,
intra_only=False, # get cvd for all 2D regions
smooth=smooth_cis,
smooth_sigma=smooth_sigma,
aggregate_smoothed=aggregate_smoothed,
clr_weight_name=clr_weight_name,
ignore_diags=ignore_diags,
chunksize=chunksize,
nproc=nproc,
)
time_elapsed = time.perf_counter() - time_start
logging.info(f"Done calculating cis expected in {time_elapsed:.3f} sec ...")
# contacts per block - i.e. inter/trans expected
time_start = time.perf_counter()
cpb = expected_trans(
clr,
view_df=view_df,
clr_weight_name=clr_weight_name,
chunksize=chunksize,
nproc=nproc,
)
# pretend that they also have a "dist"
# to make them mergeable with cvd
cpb["dist"] = 0
time_elapsed = time.perf_counter() - time_start
logging.info(f"Done calculating trans expected in {time_elapsed:.3f} sec ...")
# annotate expected_df with the region index and chromosomes
view_label = view_df \
.reset_index() \
.rename(columns={"index":"r"}) \
.set_index("name")
# which expected column to use, based on requested "modifications":
cis_expected_name = "balanced.avg" if clr_weight_name else "count.avg"
if smooth_cis:
cis_expected_name = f"{cis_expected_name}.smoothed"
if aggregate_smoothed:
cis_expected_name = f"{cis_expected_name}.agg"
# copy to the prescribed column for the final output:
cvd[expected_column_name] = cvd[cis_expected_name].copy()
# aggregate trans if requested and deide which trans-expected column to use:
trans_expected_name = "balanced.avg" if clr_weight_name else "count.avg"
if aggregate_trans:
trans_expected_name = f"{trans_expected_name}.agg"
additive_cols = ["n_valid","count.sum"]
if clr_weight_name:
additive_cols.append("balanced.sum")
# groupby chrom1, chrom2 and aggregate additive fields (sums and n_valid):
_cpb_agg = cpb.groupby(
[
view_label["chrom"].loc[cpb["region1"]].to_numpy(), # chrom1
view_label["chrom"].loc[cpb["region2"]].to_numpy(), # chrom2
]
)[additive_cols].transform("sum")
# recalculate aggregated averages:
cpb["count.avg.agg"] = _cpb_agg["count.sum"]/_cpb_agg["n_valid"]
if clr_weight_name:
cpb["balanced.avg.agg"] = _cpb_agg["balanced.sum"]/_cpb_agg["n_valid"]
# copy to the prescribed column for the final output:
cpb[expected_column_name] = cpb[trans_expected_name].copy()
# concatenate cvd and cpb (cis and trans):
expected_df = | pd.concat([cvd, cpb], ignore_index=True) | pandas.concat |
import pandas as pd
import os
import matplotlib.pyplot as plt
plt.rc('font', size=14)
import numpy as np
import seaborn as sns
sns.set(style='white')
sns.set(style='whitegrid', color_codes=True)
#
working_dir = '/Users/ljyi/Desktop/capstone/capstone8'
os.chdir(working_dir)
#
raw_data = pd.read_csv('moss_plos_one_data.csv')
raw_data.columns = raw_data.columns.str.replace('.', '_')
raw_data.shape
# (2217958, 62)
col_names = raw_data.columns.tolist()
#==============================================================================
# Data Preprocessing
#==============================================================================
# find missing values
df = raw_data
df.head()
df_nan = df.isnull().sum(axis=0).to_frame()
df_nan.columns=['counts']
col_nan = df_nan[df_nan['counts']>0]
col_nan_index = list(col_nan.index)
# find unique values in 'id'
id_unique = df['id'].unique().tolist()
id_unique
len(id_unique)
# 8105
# get train and test index based on unique 'id'
import random
random.seed(1)
train_id = random.sample(id_unique, 5674)
test_id = [avar for avar in id_unique if avar not in train_id]
# get rid of variables with two many missing values
data_df = raw_data
drop_cols = ['n_evts', 'LOS', 'ICU_Pt_Days', 'Mort', 'age'] # why not age?
data_df.drop(col_nan_index, inplace=True, axis=1)
data_df.drop(drop_cols, inplace=True, axis=1)
# 'race' with three levels and 'svc' with four levels are categorical data
dummy_race = pd.get_dummies(data_df['race'])
data_df_dummy = pd.concat([data_df, dummy_race], axis=1)
data_df_dummy.drop(columns=['race', 'oth'], inplace=True, axis=1) # dummy variable trap
dummy_svc = pd.get_dummies(data_df['svc'])
df_svc_dummy = pd.concat([data_df_dummy, dummy_svc], axis=1)
df_svc_dummy.drop(columns=['svc', 'Other'], inplace=True, axis=1)
list(df_svc_dummy.columns)
df_dummy = df_svc_dummy
# split data into training and testing sets
df_dummy.set_index('id', inplace=True)
X_y_train = df_dummy.loc[train_id]
X_y_test = df_dummy.loc[test_id]
# sample training set
true_index = np.where(X_y_train['y'].values.flatten() == True)[0]
false_index = np.where(X_y_train['y'].values.flatten() == False)[0]
random.seed(0)
selected_false_index = random.sample(list(false_index), len(true_index)*2)
train_index = list(np.append(true_index, selected_false_index))
#
#true_index = np.where(X_y_test['y'].values.flatten() == True)[0]
#false_index = np.where(X_y_test['y'].values.flatten() == False)[0]
#random.seed(0)
#selected_false_index = random.sample(list(false_index), len(true_index)*2)
#test_index = list(np.append(true_index, selected_false_index))
#
X_train = X_y_train.iloc[train_index, X_y_train.columns != 'y']
y_train = X_y_train.iloc[train_index, X_y_train.columns == 'y']
X_test = X_y_test.iloc[:, X_y_test.columns != 'y']
y_test = X_y_test.iloc[:, X_y_test.columns == 'y']
y_test = y_test.values.flatten()
len(y_train)
#1520840
np.sum(y_train == True)
# 16391
np.sum(y_train == False)
# 1504449
np.sum(y_test == True)
# 7490
np.sum(y_test == False)
# 689628
train_col_names = X_train.columns
# over-sampling using SMOTE-Synthetic Minority Oversampling Technique
from imblearn.over_sampling import SMOTE
os = SMOTE(random_state=0)
os_data_X, os_data_y = os.fit_sample(X_train, y_train)
os_data_X = pd.DataFrame(data=os_data_X, columns=train_col_names)
os_data_y = pd.DataFrame(data=os_data_y, columns=['y'])
# check the lengths of data now
os_data_X.shape
# (2996702, 55)
len(os_data_y)
# 2996702
# percent of True
n_total = len(os_data_y)
n_true = sum(os_data_y['y']==True)
n_true
# 1498351 (before oversampling: 23881)
n_false = sum(os_data_y['y']==False)
n_false
# 1498351 (before oversampling:2194077)
pct_true = n_true/n_total
pct_true
# 0.5
# 50% are event
pct_false = n_false/n_total
pct_false
# 0.5
# 50% are non-event
# here, the ratio of event to non-event is 1:1 after SMOTE.
# Final data for training
X_train_balanced = os_data_X
y_train_balanced = os_data_y
n_rows_total = len(y_train_balanced)
#n_rows_total_ls = range(n_rows_total)
random.seed(1)
#sample_rows_index = random.sample(n_rows_total_ls, 100000)
X_train_df = X_train_balanced
y_train_sample = y_train_balanced
y_train_sample = y_train_sample.values.flatten()
# feature scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train_sample = sc.fit_transform(X_train_df)
X_test = sc.transform(X_test)
type(X_train_sample)
#==============================================================================
# KNN
#==============================================================================
# ------------------------ Weighted KNN ---------------------------------------
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from sklearn.metrics import roc_auc_score
# weight all features using random forest importance
feature_importance_forest = pd.read_csv('Feature Importance.csv',names = ['name','importance'])
X_train_knn = pd.DataFrame(X_train_sample, columns=X_train_df.columns) # array to dataframe
X_test_knn = | pd.DataFrame(X_test, columns=X_train_df.columns) | pandas.DataFrame |
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Test utilities.
"""
from matched_markets.methodology import common_classes
from matched_markets.methodology import utils
import altair as alt
import numpy as np
import pandas as pd
import unittest
TimeWindow = common_classes.TimeWindow
class UtilsTest(unittest.TestCase):
def testRandomizeStrata(self):
"""Check that randomize_strata() works."""
# Mappings are possible even when number of items is 1.
self.assertEqual(utils.randomize_strata(1, [1]), [1])
self.assertLess(set(utils.randomize_strata(1, [1, 2])), {1, 2})
# Mappings are possible even when number of items <= number of groups.
self.assertEqual(utils.randomize_strata(2, [1]), [1, 1])
self.assertEqual(utils.randomize_strata(3, [1]), [1, 1, 1])
# Check that the mapping contains the expected group ids.
self.assertCountEqual(utils.randomize_strata(2, [1, 2]), [1, 2])
self.assertCountEqual(utils.randomize_strata(4, [1, 2]), [1, 2] * 2)
self.assertCountEqual(utils.randomize_strata(30, [1, 2, 3]), [1, 2, 3] * 10)
# Mappings are possible also when the number of items is not a multiple of
# groups.
groups = utils.randomize_strata(4, [1, 2, 3])
self.assertTrue(len(groups) == 4) # pylint: disable=g-generic-assert
self.assertEqual(set(groups), set([1, 2, 3]))
# String-valued group ids are possible.
self.assertCountEqual(utils.randomize_strata(30, ['a', 'b', 'c']),
['a', 'b', 'c'] * 10)
def testBrownianBridgeBounds(self):
"""Check that brownian_bridge_bounds() are calculated correctly."""
with self.assertRaisesRegex(ValueError, 'n must be >= 1'):
utils.brownian_bridge_bounds(0, 1)
with self.assertRaisesRegex(ValueError, 'sd_bound_multiplier must be > 0'):
utils.brownian_bridge_bounds(1, 0)
with self.assertRaisesRegex(ValueError, 'sd_bound_multiplier must be > 0'):
utils.brownian_bridge_bounds(1, -1)
# Unit standard deviation.
self.assertEqual(utils.brownian_bridge_bounds(1, 1), [0.0])
self.assertEqual(utils.brownian_bridge_bounds(2, 1), [np.sqrt(0.5), 0.0])
expected_one = utils.brownian_bridge_bounds(3, 1)
self.assertAlmostEqual(expected_one[0], np.sqrt(2.0 / 3.0))
self.assertAlmostEqual(expected_one[1], np.sqrt(2.0 / 3.0))
self.assertAlmostEqual(expected_one[2], 0)
# S.d. not equal to 1.
self.assertEqual(utils.brownian_bridge_bounds(2, 2), [np.sqrt(2.0), 0.0])
expected_two = utils.brownian_bridge_bounds(3, np.sqrt(3))
self.assertAlmostEqual(expected_two[0], np.sqrt(2))
self.assertAlmostEqual(expected_two[1], np.sqrt(2))
self.assertAlmostEqual(expected_two[2], 0)
def testCredibleIntervalWholeNumbers(self):
simulations = np.arange(1, 101)
level = 0.9
expected = np.array([5.0, 50.0, 95.0])
obtained = utils.credible_interval(simulations, level)
np.testing.assert_array_almost_equal(expected, obtained)
def testCredibleIntervalInterpolation(self):
simulations = np.arange(1, 101)
level = 0.88
expected = np.array([6.0, 50.0, 94.0])
obtained = utils.credible_interval(simulations, level)
np.testing.assert_array_almost_equal(expected, obtained)
def testCredibleIntervalRaisesOnLargeLevel(self):
simulations = np.arange(1, 101)
level = 0.999
with self.assertRaises(ValueError):
utils.credible_interval(simulations, level)
def testFindDaysToExclude(self):
day_week_exclude = [
'2020/10/10', '2020/11/10-2020/12/10', '2020/08/10']
days_to_remove = utils.find_days_to_exclude(day_week_exclude)
expected_days = [
TimeWindow(pd.Timestamp('2020-10-10'), pd.Timestamp('2020-10-10')),
TimeWindow(pd.Timestamp('2020-11-10'), pd.Timestamp('2020-12-10')),
TimeWindow(pd.Timestamp('2020-08-10'), pd.Timestamp('2020-08-10')),
]
for x in range(len(expected_days)):
self.assertEqual(days_to_remove[x].first_day, expected_days[x].first_day)
self.assertEqual(days_to_remove[x].last_day, expected_days[x].last_day)
def testWrongDateFormat(self):
incorrect_day = ['2020/13/13', '2020/03/03']
with self.assertRaises(ValueError):
utils.find_days_to_exclude(incorrect_day)
incorrect_time_window = ['2020/10/13 - 2020/13/11', '2020/03/03']
with self.assertRaises(ValueError):
utils.find_days_to_exclude(incorrect_time_window)
incorrect_format = ['2020/10/13 - 2020/13/11 . 2020/10/10']
with self.assertRaises(ValueError):
utils.find_days_to_exclude(incorrect_format)
def testExpandTimeWindows(self):
day_week_exclude = [
'2020/10/10', '2020/11/10-2020/12/10', '2020/08/10']
days_to_remove = utils.find_days_to_exclude(day_week_exclude)
periods = utils.expand_time_windows(days_to_remove)
expected = [
pd.Timestamp('2020-10-10', freq='D'),
pd.Timestamp('2020-08-10', freq='D'),
]
expected += pd.date_range(start='2020-11-10', end='2020-12-10', freq='D')
self.assertEqual(len(periods), len(expected))
for x in periods:
self.assertIn(x, expected)
def testHumanReadableFormat(self):
numbers = [123, 10765, 13987482, 8927462746, 1020000000000]
numb_formatted = [
utils.human_readable_number(num) for num in numbers
]
self.assertEqual(numb_formatted, ['123', '10.8K', '14M', '8.93B', '1.02tn'])
def testDefaultGeoAssignment(self):
geo_level_time_series = pd.DataFrame({
'geo': [1, 2, 3, 4],
'response': [1.1, 2.2, 3.3, 4.4]
})
geo_eligibility = pd.DataFrame({
'geo': [1, 3],
'control': [1, 0],
'treatment': [0, 1],
'exclude': [0, 0]
})
updated_eligibility = utils.default_geo_assignment(geo_level_time_series,
geo_eligibility)
self.assertTrue(
updated_eligibility.equals(
pd.DataFrame({
'geo': [1, 2, 3, 4],
'control': [1, 1, 0, 1],
'treatment': [0, 1, 1, 1],
'exclude': [0, 1, 0, 1]
})))
def testPlotIroasOverTime(self):
iroas_df = pd.DataFrame({
'date': [
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
],
'lower': [0, 0.5, 1, 1.5, 2],
'mean': [1, 1.5, 2, 2.5, 3],
'upper': [2, 2.5, 3, 3.5, 4]
})
experiment_dates = pd.DataFrame({
'date': ['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04'],
'color': [
'Pretest period', 'Pretest period', 'Experiment period',
'Experiment period'
]
})
cooldown_date = pd.DataFrame({
'date': ['2020-01-05'],
'color': ['End of cooldown period']
})
iroas_chart = utils.plot_iroas_over_time(iroas_df, experiment_dates,
cooldown_date)
self.assertIsInstance(iroas_chart, alt.LayerChart)
def testFindFrequency(self):
dates = list( | pd.date_range(start='2020-01-01', end='2020-02-01', freq='D') | pandas.date_range |
#Rule 24 - Description and text cannot be same.
def description_text(fle, fleName, target):
import re
import os
import sys
import json
import openpyxl
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
file_name="Description_text_not_same.py"
configFile = 'https://s3.us-east.cloud-object-storage.appdomain.cloud/sharad-saurav-bucket/Configuration.xlsx'
rule="Description_text_not_same"
config=pd.read_excel(configFile)
newdf=config[config['RULE']==rule]
to_check=''
for index,row in newdf.iterrows():
to_check=row['TO_CHECK']
to_check=json.loads(to_check)
files_to_apply=to_check['files_to_apply']
columns_to_apply=to_check['columns_to_apply']
print('true test-----------------------------------',files_to_apply=='ALL' , fleName + ".xlsx" in files_to_apply, files_to_apply=='ALL' or fleName + ".xlsx" in files_to_apply)
if(files_to_apply=='ALL' or fleName in files_to_apply):
data=[]
df = pd.read_excel(fle)
df.index = range(2,df.shape[0]+2)
for index,row in df.iterrows():
text=row['TEXT']
description=row['DESCRIPTION']
if(description==text):
entry=[index,fleName,'Both description and text have same contents']
print('The row '+str(index)+' in the file '+fleName+' have same contents in both description and text')
data.append(entry)
df1 = pd.DataFrame(data, columns = ['ROW_NO', 'FILE_NAME', 'COMMENTS'])
if(ExcelFile(target).sheet_names[0] == 'Sheet1'):
with | ExcelWriter(target, engine='openpyxl', mode='w') | pandas.ExcelWriter |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import matplotlib.pyplot as plt
import tweepy
import re
import sys,csv
import pandas as pd
import numpy as np
import os
import nltk
import pycountry
import string
# In[2]:
from textblob import TextBlob
class SentimentAnalysis:
def __init__(self):
self.tweets = []
self.tweetText = []
def DownloadData(self):
# authenticating
consumerKey = 'jzn0NU9EviCRRbONbUXX9a8VN'
consumerSecret = '<KEY>'
accessToken = '<KEY>'
accessTokenSecret = '<KEY>'
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessTokenSecret)
api = tweepy.API(auth)
# input for term to be searched and how many tweets to search
searchTerm = input("Enter Keyword/Tag to search about: ")
NoOfTerms = int(input("Enter how many tweets to search: "))
# searching for tweets
self.tweets = tweepy.Cursor(api.search, q=searchTerm, lang = "en").items(NoOfTerms)
# Open/create a file to append data to
csvFile = open('result.csv', 'a')
# Use csv writer
csvWriter = csv.writer(csvFile)
# creating some variables to store info
polarity = 0
positive = 0
wpositive = 0
spositive = 0
negative = 0
wnegative = 0
snegative = 0
neutral = 0
# iterating through tweets fetched
for tweet in self.tweets:
#Append to temp so that we can store in csv later. I use encode UTF-8
self.tweetText.append(self.cleanTweet(tweet.text).encode('utf-8'))
# print (tweet.text.translate(non_bmp_map)) #print tweet's text
analysis = TextBlob(tweet.text)
# print(analysis.sentiment) # print tweet's polarity
polarity += analysis.sentiment.polarity # adding up polarities to find the average later
if (analysis.sentiment.polarity == 0): # adding reaction of how people are reacting to find average later
neutral += 1
elif (analysis.sentiment.polarity > 0 and analysis.sentiment.polarity <= 0.3):
wpositive += 1
elif (analysis.sentiment.polarity > 0.3 and analysis.sentiment.polarity <= 0.6):
positive += 1
elif (analysis.sentiment.polarity > 0.6 and analysis.sentiment.polarity <= 1):
spositive += 1
elif (analysis.sentiment.polarity > -0.3 and analysis.sentiment.polarity <= 0):
wnegative += 1
elif (analysis.sentiment.polarity > -0.6 and analysis.sentiment.polarity <= -0.3):
negative += 1
elif (analysis.sentiment.polarity > -1 and analysis.sentiment.polarity <= -0.6):
snegative += 1
# Write to csv and close csv file
csvWriter.writerow(self.tweetText)
csvFile.close()
# finding average of how people are reacting
positive = self.percentage(positive, NoOfTerms)
wpositive = self.percentage(wpositive, NoOfTerms)
spositive = self.percentage(spositive, NoOfTerms)
negative = self.percentage(negative, NoOfTerms)
wnegative = self.percentage(wnegative, NoOfTerms)
snegative = self.percentage(snegative, NoOfTerms)
neutral = self.percentage(neutral, NoOfTerms)
# finding average reaction
polarity = polarity / NoOfTerms
# printing out data
print("How people are reacting on " + searchTerm + " by analyzing " + str(NoOfTerms) + " tweets.")
print()
print("General Report: ")
if (polarity == 0):
print("Neutral")
elif (polarity > 0 and polarity <= 0.3):
print("Weakly Positive")
elif (polarity > 0.3 and polarity <= 0.6):
print("Positive")
elif (polarity > 0.6 and polarity <= 1):
print("Strongly Positive")
elif (polarity > -0.3 and polarity <= 0):
print("Weakly Negative")
elif (polarity > -0.6 and polarity <= -0.3):
print("Negative")
elif (polarity > -1 and polarity <= -0.6):
print("Strongly Negative")
print()
print("Detailed Report: ")
print(str(positive) + "% people thought it was positive")
print(str(wpositive) + "% people thought it was weakly positive")
print(str(spositive) + "% people thought it was strongly positive")
print(str(negative) + "% people thought it was negative")
print(str(wnegative) + "% people thought it was weakly negative")
print(str(snegative) + "% people thought it was strongly negative")
print(str(neutral) + "% people thought it was neutral")
self.plotPieChart(positive, wpositive, spositive, negative, wnegative, snegative, neutral, searchTerm, NoOfTerms)
def cleanTweet(self, tweet):
# Remove Links, Special Characters etc from tweet
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t]) | (\w +:\ / \ / \S +)", " ", tweet).split())
# function to calculate percentage
def percentage(self, part, whole):
temp = 100 * float(part) / float(whole)
return format(temp, '.2f')
def plotPieChart(self, positive, wpositive, spositive, negative, wnegative, snegative, neutral, searchTerm, noOfSearchTerms):
labels = ['Positive [' + str(positive) + '%]', 'Weakly Positive [' + str(wpositive) + '%]','Strongly Positive [' + str(spositive) + '%]', 'Neutral [' + str(neutral) + '%]',
'Negative [' + str(negative) + '%]', 'Weakly Negative [' + str(wnegative) + '%]', 'Strongly Negative [' + str(snegative) + '%]']
sizes = [positive, wpositive, spositive, neutral, negative, wnegative, snegative]
colors = ['yellowgreen','lightgreen','darkgreen', 'gold', 'red','lightsalmon','darkred']
patches, texts = plt.pie(sizes, colors=colors, startangle=90)
plt.legend(patches, labels, loc="best")
plt.title('How people are reacting on ' + searchTerm + ' by analyzing ' + str(noOfSearchTerms) + ' Tweets.')
plt.axis('equal')
plt.tight_layout()
plt.show()
if __name__== "__main__":
sa = SentimentAnalysis()
sa.DownloadData()
# In[3]:
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
#!pip install nltk.sentiment.vader
#from nltk.sentiment.vader import SentimentIntensityAnalyzer
consumerKey = 'jzn0NU9EviCRRbONbUXX9a8VN'
consumerSecret = '<KEY>'
accessToken = '<KEY>'
accessTokenSecret = '<KEY>'
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessTokenSecret)
api = tweepy.API(auth)
def percentage(part,whole):
return 100 * float(part)/float(whole)
keyword = input("Please enter keyword or hashtag to search: ")
noOfTweet = int(input ("Please enter how many tweets to analyze: "))
tweets = tweepy.Cursor(api.search, q=keyword).items(noOfTweet)
positive = 0
negative = 0
neutral = 0
polarity = 0
tweet_list = []
neutral_list = []
negative_list = []
positive_list = []
for tweet in tweets:
#print(tweet.text)
tweet_list.append(tweet.text)
analysis = TextBlob(tweet.text)
score = SentimentIntensityAnalyzer().polarity_scores(tweet.text)
neg = score['neg']
neu = score['neu']
pos = score['pos']
comp = score['compound']
polarity += analysis.sentiment.polarity
if neg > pos:
negative_list.append(tweet.text)
negative += 1
elif pos > neg:
positive_list.append(tweet.text)
positive += 1
elif pos == neg:
neutral_list.append(tweet.text)
neutral += 1
positive = percentage(positive, noOfTweet)
negative = percentage(negative, noOfTweet)
neutral = percentage(neutral, noOfTweet)
polarity = percentage(polarity, noOfTweet)
positive = format(positive, '.1f')
negative = format(negative, '.1f')
neutral = format(neutral, '.1f')
# In[4]:
#Number of Tweets (Total, Positive, Negative, Neutral)
tweet_list = pd.DataFrame(tweet_list)
neutral_list = pd.DataFrame(neutral_list)
negative_list = pd.DataFrame(negative_list)
positive_list = pd.DataFrame(positive_list)
print("total number: ",len(tweet_list))
print("positive number: ",len(positive_list))
print("negative number: ", len(negative_list))
print("neutral number: ",len(neutral_list))
# In[5]:
tweet_list
# In[6]:
tw_list = pd.DataFrame(tweet_list)
tw_list["text"] = tw_list[0]
tw_list
#Calculating Negative, Positive, Neutral and Compound values
tw_list[['polarity', 'subjectivity']] = tw_list['text'].apply(lambda Text: pd.Series(TextBlob(Text).sentiment))
remove_rt = lambda x: re.sub('RT @\w+: '," ",x)
rt = lambda x: re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)"," ",x)
tw_list["text"] = tw_list.text.map(remove_rt).map(rt)
tw_list["text"] = tw_list.text.str.lower()
tw_list.head(10)
for index, row in tw_list['text'].iteritems():
score = SentimentIntensityAnalyzer().polarity_scores(row)
neg = score['neg']
neu = score['neu']
pos = score['pos']
comp = score['compound']
if neg > pos:
tw_list.loc[index, 'sentiment'] = "negative"
elif pos > neg:
tw_list.loc[index, 'sentiment'] = "positive"
else:
tw_list.loc[index, 'sentiment'] = "neutral"
tw_list.loc[index, 'neg'] = neg
tw_list.loc[index, 'neu'] = neu
tw_list.loc[index, 'pos'] = pos
tw_list.loc[index, 'compound'] = comp
tw_list.head(10)
# Install the following packages if needed to run word cloud, stopwords, and image
# !pip install wordcloud
# !pip install stopwords
# !pip install image
# In[7]:
# 3.To get the tweets in a Proper format, first lets create a Dataframe to store the extracted data.
df = | pd.DataFrame(columns=["Date","User","IsVerified","Tweet","Likes","RT",'User_location'])
print(df) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from dku_timeseries import WindowAggregator
from recipe_config_loading import get_windowing_params
@pytest.fixture
def columns():
class COLUMNS:
date = "Date"
category = "country"
aggregation = "value1_avg"
return COLUMNS
@pytest.fixture
def df(columns):
co2 = [315.58, 316.39, 316.79, 316.2]
country = ["first", "first", "second", "second"]
time_index = pd.date_range("1-1-1959", periods=4, freq="M")
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, columns.category: country, columns.date: time_index})
return df
@pytest.fixture
def long_df(columns):
co2 = [315.58, 316.39, 316.79, 316.2, 345, 234, 100, 299]
country = ["first", "first", "first", "first", "second", "second", "second", "second"]
time_index = pd.date_range("1-1-1959", periods=4, freq="D").append(pd.date_range("1-1-1959", periods=4, freq="D"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, columns.category: country, columns.date: time_index})
return df
@pytest.fixture
def long_df_2(columns):
co2 = [315.58, 316.39, 316.79, 316.2, 9, 10]
country = ["first", "first", "second", "second", "third", "third"]
country_2 = ["first", "first", "second", "second", "third", "third"]
time_index = pd.date_range("1-1-1959", periods=2, freq="M").append(pd.date_range("1-1-1959", periods=2, freq="M")).append(
pd.date_range("1-1-1959", periods=2, freq="M"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, columns.category: country, "item": country_2, columns.date: time_index})
return df
@pytest.fixture
def long_df_3(columns):
co2 = [315.58, 316.39, 316.79, 316.2, 9, 10, 2, 3]
country = ["first", "first", "second", "second", "third", "third", "fourth", "fourth"]
country_2 = ["first", "first", "second", "second", "third", "third", "fourth", "fourth"]
country_3 = ["first", "first", "second", "second", "third", "third", "fourth", "fourth"]
time_index = pd.date_range("1-1-1959", periods=2, freq="M").append(pd.date_range("1-1-1959", periods=2, freq="M")).append(
pd.date_range("1-1-1959", periods=2, freq="M")).append(pd.date_range("1-1-1959", periods=2, freq="M"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, columns.category: country, "item": country_2, "store": country_3, columns.date: time_index})
return df
@pytest.fixture
def long_df_4(columns):
co2 = [315.58, 316.39, 316.79, 316.2, 9, 10, 2, 3]
country = ["first", "first", "second", "second", "third", "third", "first", "first"]
country_2 = ["first", "first", "second", "second", "third", "third", "second", "first"]
country_3 = ["first", "first", "second", "second", "third", "third", "third", "fourth"]
time_index = | pd.date_range("1-1-2020", periods=2, freq="M") | pandas.date_range |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
| pd.Series([0, 15, 10], index=[0, None, 9]) | pandas.Series |
# standard library imports
import os
import datetime
import re
import math
import copy
import collections
from functools import wraps
from itertools import combinations
import warnings
import pytz
import importlib
# anaconda distribution defaults
import dateutil
import numpy as np
import pandas as pd
# anaconda distribution defaults
# statistics and machine learning imports
import statsmodels.formula.api as smf
from scipy import stats
# from sklearn.covariance import EllipticEnvelope
import sklearn.covariance as sk_cv
# anaconda distribution defaults
# visualization library imports
import matplotlib.pyplot as plt
from bokeh.io import output_notebook, show
from bokeh.plotting import figure
from bokeh.palettes import Category10, Category20c, Category20b
from bokeh.layouts import gridplot
from bokeh.models import Legend, HoverTool, tools, ColumnDataSource
# visualization library imports
hv_spec = importlib.util.find_spec('holoviews')
if hv_spec is not None:
import holoviews as hv
from holoviews.plotting.links import DataLink
else:
warnings.warn('Some plotting functions will not work without the '
'holoviews package.')
# pvlib imports
pvlib_spec = importlib.util.find_spec('pvlib')
if pvlib_spec is not None:
from pvlib.location import Location
from pvlib.pvsystem import PVSystem
from pvlib.tracking import SingleAxisTracker
from pvlib.pvsystem import retrieve_sam
from pvlib.modelchain import ModelChain
from pvlib.clearsky import detect_clearsky
else:
warnings.warn('Clear sky functions will not work without the '
'pvlib package.')
plot_colors_brewer = {'real_pwr': ['#2b8cbe', '#7bccc4', '#bae4bc', '#f0f9e8'],
'irr-poa': ['#e31a1c', '#fd8d3c', '#fecc5c', '#ffffb2'],
'irr-ghi': ['#91003f', '#e7298a', '#c994c7', '#e7e1ef'],
'temp-amb': ['#238443', '#78c679', '#c2e699', '#ffffcc'],
'temp-mod': ['#88419d', '#8c96c6', '#b3cde3', '#edf8fb'],
'wind': ['#238b45', '#66c2a4', '#b2e2e2', '#edf8fb']}
met_keys = ['poa', 't_amb', 'w_vel', 'power']
# The search strings for types cannot be duplicated across types.
type_defs = collections.OrderedDict([
('irr', [['irradiance', 'irr', 'plane of array', 'poa', 'ghi',
'global', 'glob', 'w/m^2', 'w/m2', 'w/m', 'w/'],
(-10, 1500)]),
('temp', [['temperature', 'temp', 'degrees', 'deg', 'ambient',
'amb', 'cell temperature', 'TArray'],
(-49, 127)]),
('wind', [['wind', 'speed'],
(0, 18)]),
('pf', [['power factor', 'factor', 'pf'],
(-1, 1)]),
('op_state', [['operating state', 'state', 'op', 'status'],
(0, 10)]),
('real_pwr', [['real power', 'ac power', 'e_grid'],
(-1000000, 1000000000000)]), # set to very lax bounds
('shade', [['fshdbm', 'shd', 'shade'], (0, 1)]),
('pvsyt_losses', [['IL Pmax', 'IL Pmin', 'IL Vmax', 'IL Vmin'],
(-1000000000, 100000000)]),
('index', [['index'], ('', 'z')])])
sub_type_defs = collections.OrderedDict([
('ghi', [['sun2', 'global horizontal', 'ghi', 'global',
'GlobHor']]),
('poa', [['sun', 'plane of array', 'poa', 'GlobInc']]),
('amb', [['TempF', 'ambient', 'amb']]),
('mod', [['Temp1', 'module', 'mod', 'TArray']]),
('mtr', [['revenue meter', 'rev meter', 'billing meter', 'meter']]),
('inv', [['inverter', 'inv']])])
irr_sensors_defs = {'ref_cell': [['reference cell', 'reference', 'ref',
'referance', 'pvel']],
'pyran': [['pyranometer', 'pyran']],
'clear_sky':[['csky']]}
columns = ['pts_after_filter', 'pts_removed', 'filter_arguments']
def update_summary(func):
"""
Todo
----
not in place
Check if summary is updated when function is called with inplace=False.
It should not be.
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
pts_before = self.df_flt.shape[0]
if pts_before == 0:
pts_before = self.df.shape[0]
self.summary_ix.append((self.name, 'count'))
self.summary.append({columns[0]: pts_before,
columns[1]: 0,
columns[2]: 'no filters'})
ret_val = func(self, *args, **kwargs)
arg_str = args.__repr__()
lst = arg_str.split(',')
arg_lst = [item.strip("'() ") for item in lst]
# arg_lst_one = arg_lst[0]
# if arg_lst_one == 'das' or arg_lst_one == 'sim':
# arg_lst = arg_lst[1:]
# arg_str = ', '.join(arg_lst)
kwarg_str = kwargs.__repr__()
kwarg_str = kwarg_str.strip('{}')
if len(arg_str) == 0 and len(kwarg_str) == 0:
arg_str = 'no arguments'
elif len(arg_str) == 0:
arg_str = kwarg_str
else:
arg_str = arg_str + ', ' + kwarg_str
pts_after = self.df_flt.shape[0]
pts_removed = pts_before - pts_after
self.summary_ix.append((self.name, func.__name__))
self.summary.append({columns[0]: pts_after,
columns[1]: pts_removed,
columns[2]: arg_str})
if pts_after == 0:
warnings.warn('The last filter removed all data! '
'Calling additional filtering or visualization '
'methods that reference the df_flt attribute will '
'raise an error.')
return ret_val
return wrapper
def cntg_eoy(df, start, end):
"""
Shifts data before or after new year to form a contigous time period.
This function shifts data from the end of the year a year back or data from
the begining of the year a year forward, to create a contiguous time period.
Intended to be used on historical typical year data.
If start date is in dataframe, then data at the beginning of the year will
be moved ahead one year. If end date is in dataframe, then data at the end
of the year will be moved back one year.
cntg (contiguous); eoy (end of year)
Parameters
----------
df: pandas DataFrame
Dataframe to be adjusted.
start: pandas Timestamp
Start date for time period.
end: pandas Timestamp
End date for time period.
Todo
----
Need to test and debug this for years not matching.
"""
if df.index[0].year == start.year:
df_beg = df.loc[start:, :]
df_end = df.copy()
df_end.index = df_end.index + pd.DateOffset(days=365)
df_end = df_end.loc[:end, :]
elif df.index[0].year == end.year:
df_end = df.loc[:end, :]
df_beg = df.copy()
df_beg.index = df_beg.index - pd.DateOffset(days=365)
df_beg = df_beg.loc[start:, :]
df_return = pd.concat([df_beg, df_end], axis=0)
ix_ser = df_return.index.to_series()
df_return['index'] = ix_ser.apply(lambda x: x.strftime('%m/%d/%Y %H %M'))
return df_return
def spans_year(start_date, end_date):
"""
Returns boolean indicating if dates passes are in the same year.
Parameters
----------
start_date: pandas Timestamp
end_date: pandas Timestamp
"""
if start_date.year != end_date.year:
return True
else:
return False
def wrap_seasons(df, freq):
"""
Rearrange an 8760 so a quarterly groupby will result in seasonal groups.
Parameters
----------
df : DataFrame
Dataframe to be rearranged.
freq : str
String pandas offset alias to specify aggregattion frequency
for reporting condition calculation.
Returns
-------
DataFrame
Todo
----
Write unit test
BQ-NOV vs BQS vs QS
Need to review if BQ is the correct offset alias vs BQS or QS.
"""
check_freqs = ['BQ-JAN', 'BQ-FEB', 'BQ-APR', 'BQ-MAY', 'BQ-JUL',
'BQ-AUG', 'BQ-OCT', 'BQ-NOV']
mnth_int = {'JAN': 1, 'FEB': 2, 'APR': 4, 'MAY': 5, 'JUL': 7,
'AUG': 8, 'OCT': 10, 'NOV': 11}
if freq in check_freqs:
warnings.warn('DataFrame index adjusted to be continous through new'
'year, but not returned or set to attribute for user.'
'This is not an issue if using RCs with'
'predict_capacities.')
if isinstance(freq, str):
mnth = mnth_int[freq.split('-')[1]]
else:
mnth = freq.startingMonth
year = df.index[0].year
mnths_eoy = 12 - mnth
mnths_boy = 3 - mnths_eoy
if int(mnth) >= 10:
str_date = str(mnths_boy) + '/' + str(year)
else:
str_date = str(mnth) + '/' + str(year)
tdelta = df.index[1] - df.index[0]
date_to_offset = df.loc[str_date].index[-1].to_pydatetime()
start = date_to_offset + tdelta
end = date_to_offset + pd.DateOffset(years=1)
if mnth < 8 or mnth >= 10:
df = cntg_eoy(df, start, end)
else:
df = cntg_eoy(df, end, start)
return df
else:
return df
def perc_wrap(p):
def numpy_percentile(x):
return np.percentile(x.T, p, interpolation='nearest')
return numpy_percentile
def perc_bounds(perc):
"""
perc_flt : float or tuple, default None
Percentage or tuple of percentages used to filter around reporting
irradiance in the irrRC_balanced function. Required argument when
irr_bal is True.
"""
if isinstance(perc, tuple):
perc_low = perc[0] / 100
perc_high = perc[1] / 100
else:
perc_low = perc / 100
perc_high = perc / 100
low = 1 - (perc_low)
high = 1 + (perc_high)
return (low, high)
def perc_difference(x, y):
"""
Calculate percent difference of two values.
"""
if x == y == 0:
return 0
else:
return abs(x - y) / ((x + y) / 2)
def check_all_perc_diff_comb(series, perc_diff):
"""
Check series for pairs of values with percent difference above perc_diff.
Calculates the percent difference between all combinations of two values in
the passed series and checks if all of them are below the passed perc_diff.
Parameters
----------
series : pd.Series
Pandas series of values to check.
perc_diff : float
Percent difference threshold value as decimal i.e. 5% is 0.05.
Returns
-------
bool
"""
c = combinations(series.__iter__(), 2)
return all([perc_difference(x, y) < perc_diff for x, y in c])
def sensor_filter(df, perc_diff):
"""
Check dataframe for rows with inconsistent values.
Applies check_all_perc_diff_comb function along rows of passed dataframe.
Parameters
----------
df : pandas DataFrame
perc_diff : float
Percent difference as decimal.
"""
if df.shape[1] >= 2:
bool_ser = df.apply(check_all_perc_diff_comb, perc_diff=perc_diff,
axis=1)
return df[bool_ser].index
elif df.shape[1] == 1:
return df.index
def flt_irr(df, irr_col, low, high, ref_val=None):
"""
Top level filter on irradiance values.
Parameters
----------
df : DataFrame
Dataframe to be filtered.
irr_col : str
String that is the name of the column with the irradiance data.
low : float or int
Minimum value as fraction (0.8) or absolute 200 (W/m^2)
high : float or int
Max value as fraction (1.2) or absolute 800 (W/m^2)
ref_val : float or int
Must provide arg when min/max are fractions
Returns
-------
DataFrame
"""
if ref_val is not None:
low *= ref_val
high *= ref_val
df_renamed = df.rename(columns={irr_col: 'poa'})
flt_str = '@low <= ' + 'poa' + ' <= @high'
indx = df_renamed.query(flt_str).index
return df.loc[indx, :]
def filter_grps(grps, rcs, irr_col, low, high, **kwargs):
"""
Apply irradiance filter around passsed reporting irradiances to groupby.
For each group in the grps argument the irradiance is filtered by a
percentage around the reporting irradiance provided in rcs.
Parameters
----------
grps : pandas groupby
Groupby object with time groups (months, seasons, etc.).
rcs : pandas DataFrame
Dataframe of reporting conditions. Use the rep_cond method to generate
a dataframe for this argument.
**kwargs
Passed to pandas Grouper to control label and closed side of intervals.
See pandas Grouper doucmentation for details. Default is left labeled
and left closed.
Returns
-------
pandas groupby
"""
flt_dfs = []
freq = list(grps.groups.keys())[0].freq
for grp_name, grp_df in grps:
ref_val = rcs.loc[grp_name, 'poa']
grp_df_flt = flt_irr(grp_df, irr_col, low, high, ref_val=ref_val)
flt_dfs.append(grp_df_flt)
df_flt = pd.concat(flt_dfs)
df_flt_grpby = df_flt.groupby(pd.Grouper(freq=freq, **kwargs))
return df_flt_grpby
def irrRC_balanced(df, low, high, irr_col='GlobInc', plot=False):
"""
Iteratively calculates reporting irradiance that achieves 40/60 balance.
This function is intended to implement a strict interpratation of common
contract language that specifies the reporting irradiance be determined by
finding the irradiance that results in a balance of points within a
+/- percent range of the reporting irradiance. This function
iterates to a solution for the reporting irradiance by calculating the
irradiance that has 10 datpoints in the filtered dataset above it, then
filtering for a percentage of points around that irradiance, calculating
what percentile the reporting irradiance is in. This procedure continues
until 40% of the points in the filtered dataset are above the calculated
reporting irradiance.
Parameters
----------
df: pandas DataFrame
DataFrame containing irradiance data for calculating the irradiance
reporting condition.
low: float
Bottom value for irradiance filter, usually between 0.5 and 0.8.
high: float
Top value for irradiance filter, usually between 1.2 and 1.5.
irr_col: str
String that is the name of the column with the irradiance data.
plot: bool, default False
Plots graphical view of algorithim searching for reporting irradiance.
Useful for troubleshooting or understanding the method.
Returns
-------
Tuple
Float reporting irradiance and filtered dataframe.
"""
if plot:
irr = df[irr_col].values
x = np.ones(irr.shape[0])
plt.plot(x, irr, 'o', markerfacecolor=(0.5, 0.7, 0.5, 0.1))
plt.ylabel('irr')
x_inc = 1.01
vals_above = 10
perc = 100.
pt_qty = 0
loop_cnt = 0
pt_qty_array = []
# print('--------------- MONTH START --------------')
while perc > 0.6 or pt_qty < 50:
# print('####### LOOP START #######')
df_count = df.shape[0]
df_perc = 1 - (vals_above / df_count)
# print('in percent: {}'.format(df_perc))
irr_RC = (df[irr_col].agg(perc_wrap(df_perc * 100)))
# print('ref irr: {}'.format(irr_RC))
flt_df = flt_irr(df, irr_col, low, high, ref_val=irr_RC)
# print('number of vals: {}'.format(df.shape))
pt_qty = flt_df.shape[0]
# print('flt pt qty: {}'.format(pt_qty))
perc = stats.percentileofscore(flt_df[irr_col], irr_RC) / 100
# print('out percent: {}'.format(perc))
vals_above += 1
pt_qty_array.append(pt_qty)
if perc <= 0.6 and pt_qty <= pt_qty_array[loop_cnt - 1]:
break
loop_cnt += 1
if plot:
x_inc += 0.02
y1 = irr_RC * low
y2 = irr_RC * high
plt.plot(x_inc, irr_RC, 'ro')
plt.plot([x_inc, x_inc], [y1, y2])
if plot:
plt.show()
return(irr_RC, flt_df)
def fit_model(df, fml='power ~ poa + I(poa * poa) + I(poa * t_amb) + I(poa * w_vel) - 1'):
"""
Fits linear regression using statsmodels to dataframe passed.
Dataframe must be first argument for use with pandas groupby object
apply method.
Parameters
----------
df : pandas dataframe
fml : str
Formula to fit refer to statsmodels and patsy documentation for format.
Default is the formula in ASTM E2848.
Returns
-------
Statsmodels linear model regression results wrapper object.
"""
mod = smf.ols(formula=fml, data=df)
reg = mod.fit()
return reg
def predict(regs, rcs):
"""
Calculates predicted values for given linear models and predictor values.
Evaluates the first linear model in the iterable with the first row of the
predictor values in the dataframe. Passed arguments must be aligned.
Parameters
----------
regs : iterable of statsmodels regression results wrappers
rcs : pandas dataframe
Dataframe of predictor values used to evaluate each linear model.
The column names must match the strings used in the regression formuala.
Returns
-------
Pandas series of predicted values.
"""
pred_cap = pd.Series()
for i, mod in enumerate(regs):
RC_df = pd.DataFrame(rcs.iloc[i, :]).T
pred_cap = pred_cap.append(mod.predict(RC_df))
return pred_cap
def pred_summary(grps, rcs, allowance, **kwargs):
"""
Creates summary table of reporting conditions, pred cap, and gauranteed cap.
This method does not calculate reporting conditions.
Parameters
----------
grps : pandas groupby object
Solar data grouped by season or month used to calculate reporting
conditions. This argument is used to fit models for each group.
rcs : pandas dataframe
Dataframe of reporting conditions used to predict capacities.
allowance : float
Percent allowance to calculate gauranteed capacity from predicted capacity.
Returns
-------
Dataframe of reporting conditions, model coefficients, predicted capacities
gauranteed capacities, and points in each grouping.
"""
regs = grps.apply(fit_model, **kwargs)
predictions = predict(regs, rcs)
params = regs.apply(lambda x: x.params.transpose())
pt_qty = grps.agg('count').iloc[:, 0]
predictions.index = pt_qty.index
params.index = pt_qty.index
rcs.index = pt_qty.index
predictions.name = 'PredCap'
for rc_col_name in rcs.columns:
for param_col_name in params.columns:
if rc_col_name == param_col_name:
params.rename(columns={param_col_name: param_col_name + '-param'},
inplace=True)
results = pd.concat([rcs, predictions, params], axis=1)
results['guaranteedCap'] = results['PredCap'] * (1 - allowance)
results['pt_qty'] = pt_qty.values
return results
def pvlib_location(loc):
"""
Creates a pvlib location object.
Parameters
----------
loc : dict
Dictionary of values required to instantiate a pvlib Location object.
loc = {'latitude': float,
'longitude': float,
'altitude': float/int,
'tz': str, int, float, or pytz.timezone, default 'UTC'}
See
http://en.wikipedia.org/wiki/List_of_tz_database_time_zones
for a list of valid time zones.
pytz.timezone objects will be converted to strings.
ints and floats must be in hours from UTC.
Returns
-------
pvlib location object.
"""
return Location(**loc)
def pvlib_system(sys):
"""
Creates a pvlib PVSystem or SingleAxisTracker object.
A SingleAxisTracker object is created if any of the keyword arguments for
initiating a SingleAxisTracker object are found in the keys of the passed
dictionary.
Parameters
----------
sys : dict
Dictionary of keywords required to create a pvlib SingleAxisTracker
or PVSystem.
Example dictionaries:
fixed_sys = {'surface_tilt': 20,
'surface_azimuth': 180,
'albedo': 0.2}
tracker_sys1 = {'axis_tilt': 0, 'axis_azimuth': 0,
'max_angle': 90, 'backtrack': True,
'gcr': 0.2, 'albedo': 0.2}
Refer to pvlib documentation for details.
https://pvlib-python.readthedocs.io/en/latest/generated/pvlib.pvsystem.PVSystem.html
https://pvlib-python.readthedocs.io/en/latest/generated/pvlib.tracking.SingleAxisTracker.html
Returns
-------
pvlib PVSystem or SingleAxisTracker object.
"""
sandia_modules = retrieve_sam('SandiaMod')
cec_inverters = retrieve_sam('cecinverter')
sandia_module = sandia_modules['Canadian_Solar_CS5P_220M___2009_']
cec_inverter = cec_inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_']
trck_kwords = ['axis_tilt', 'axis_azimuth', 'max_angle', 'backtrack', 'gcr']
if any(kword in sys.keys() for kword in trck_kwords):
system = SingleAxisTracker(**sys,
module_parameters=sandia_module,
inverter_parameters=cec_inverter)
else:
system = PVSystem(**sys,
module_parameters=sandia_module,
inverter_parameters=cec_inverter)
return system
def get_tz_index(time_source, loc):
"""
Creates DatetimeIndex with timezone aligned with location dictionary.
Handles generating a DatetimeIndex with a timezone for use as an agrument
to pvlib ModelChain prepare_inputs method or pvlib Location get_clearsky
method.
Parameters
----------
time_source : dataframe or DatetimeIndex
If passing a dataframe the index of the dataframe will be used. If the
index does not have a timezone the timezone will be set using the
timezone in the passed loc dictionary.
If passing a DatetimeIndex with a timezone it will be returned directly.
If passing a DatetimeIndex without a timezone the timezone in the
timezone dictionary will be used.
Returns
-------
DatetimeIndex with timezone
"""
if isinstance(time_source, pd.core.indexes.datetimes.DatetimeIndex):
if time_source.tz is None:
time_source = time_source.tz_localize(loc['tz'], ambiguous='infer',
errors='coerce')
return time_source
else:
if pytz.timezone(loc['tz']) != time_source.tz:
warnings.warn('Passed a DatetimeIndex with a timezone that '
'does not match the timezone in the loc dict. '
'Using the timezone of the DatetimeIndex.')
return time_source
elif isinstance(time_source, pd.core.frame.DataFrame):
if time_source.index.tz is None:
return time_source.index.tz_localize(loc['tz'], ambiguous='infer',
errors='coerce')
else:
if pytz.timezone(loc['tz']) != time_source.index.tz:
warnings.warn('Passed a DataFrame with a timezone that '
'does not match the timezone in the loc dict. '
'Using the timezone of the DataFrame.')
return time_source.index
def csky(time_source, loc=None, sys=None, concat=True, output='both'):
"""
Calculate clear sky poa and ghi.
Parameters
----------
time_source : dataframe or DatetimeIndex
If passing a dataframe the index of the dataframe will be used. If the
index does not have a timezone the timezone will be set using the
timezone in the passed loc dictionary.
If passing a DatetimeIndex with a timezone it will be returned directly.
If passing a DatetimeIndex without a timezone the timezone in the
timezone dictionary will be used.
loc : dict
Dictionary of values required to instantiate a pvlib Location object.
loc = {'latitude': float,
'longitude': float,
'altitude': float/int,
'tz': str, int, float, or pytz.timezone, default 'UTC'}
See
http://en.wikipedia.org/wiki/List_of_tz_database_time_zones
for a list of valid time zones.
pytz.timezone objects will be converted to strings.
ints and floats must be in hours from UTC.
sys : dict
Dictionary of keywords required to create a pvlib SingleAxisTracker
or PVSystem.
Example dictionaries:
fixed_sys = {'surface_tilt': 20,
'surface_azimuth': 180,
'albedo': 0.2}
tracker_sys1 = {'axis_tilt': 0, 'axis_azimuth': 0,
'max_angle': 90, 'backtrack': True,
'gcr': 0.2, 'albedo': 0.2}
Refer to pvlib documentation for details.
https://pvlib-python.readthedocs.io/en/latest/generated/pvlib.pvsystem.PVSystem.html
https://pvlib-python.readthedocs.io/en/latest/generated/pvlib.tracking.SingleAxisTracker.html
concat : bool, default True
If concat is True then returns columns as defined by return argument
added to passed dataframe, otherwise returns just clear sky data.
output : str, default 'both'
both - returns only total poa and ghi
poa_all - returns all components of poa
ghi_all - returns all components of ghi
all - returns all components of poa and ghi
"""
location = pvlib_location(loc)
system = pvlib_system(sys)
mc = ModelChain(system, location)
times = get_tz_index(time_source, loc)
if output == 'both':
ghi = location.get_clearsky(times=times)
mc.prepare_inputs(times=times)
csky_df = pd.DataFrame({'poa_mod_csky': mc.total_irrad['poa_global'],
'ghi_mod_csky': ghi['ghi']})
if output == 'poa_all':
mc.prepare_inputs(times=times)
csky_df = mc.total_irrad
if output == 'ghi_all':
csky_df = location.get_clearsky(times=times)
if output == 'all':
ghi = location.get_clearsky(times=times)
mc.prepare_inputs(times=times)
csky_df = pd.concat([mc.total_irrad, ghi], axis=1)
ix_no_tz = csky_df.index.tz_localize(None, ambiguous='infer',
errors='coerce')
csky_df.index = ix_no_tz
if concat:
if isinstance(time_source, pd.core.frame.DataFrame):
df_with_csky = pd.concat([time_source, csky_df], axis=1)
return df_with_csky
else:
warnings.warn('time_source is not a dataframe; only clear sky data\
returned')
return csky_df
else:
return csky_df
def get_summary(*args):
summaries = [cd.get_summary() for cd in args]
return pd.concat(summaries)
def pick_attr(sim, das, name):
sim_attr = getattr(sim, name)
das_attr = getattr(das, name)
if sim_attr is None and das_attr is None:
warn_str = '{} must be set for either sim or das'.format(name)
return warnings.warn(warn_str)
elif sim_attr is None and das_attr is not None:
return (das_attr, 'das')
elif sim_attr is not None and das_attr is None:
return (sim_attr, 'sim')
elif sim_attr is not None and das_attr is not None:
warn_str = ('{} found for sim and das set {} to None for one of '
'the two'.format(name, name))
return warnings.warn(warn_str)
def determine_pass_or_fail(cap_ratio, tolerance, nameplate):
"""
Determine a pass/fail result from a capacity ratio and test tolerance.
Parameters
----------
cap_ratio : float
Ratio of the measured data regression result to the simulated data
regression result.
tolerance : str
String representing error band. Ex. '+/- 3' or '- 5'
There must be space between the sign and number. Number is
interpreted as a percent. For example, 5 percent is 5 not 0.05.
nameplate : numeric
Nameplate rating of the PV plant.
Returns
-------
tuple of boolean and string
True for a passing test and false for a failing test.
Limits for passing and failing test.
"""
sign = tolerance.split(sep=' ')[0]
error = int(tolerance.split(sep=' ')[1]) / 100
nameplate_plus_error = nameplate * (1 + error)
nameplate_minus_error = nameplate * (1 - error)
if sign == '+/-' or sign == '-/+':
return (round(np.abs(1 - cap_ratio), ndigits=6) <= error,
str(nameplate_minus_error) + ', ' + str(nameplate_plus_error))
elif sign == '-':
return (cap_ratio >= 1 - error,
str(nameplate_minus_error) + ', None')
else:
warnings.warn("Sign must be '-', '+/-', or '-/+'.")
def cp_results(sim, das, nameplate, tolerance, check_pvalues=False, pval=0.05,
print_res=True):
"""
Prints a summary indicating if system passed or failed capacity test.
NOTE: Method will try to adjust for 1000x differences in units.
Parameters
----------
sim : CapData
CapData object for simulated data.
das : CapData
CapData object for measured data.
nameplate : numeric
Nameplate rating of the PV plant.
tolerance : str
String representing error band. Ex. +/- 3', '- 5'
There must be space between the sign and number. Number is
interpreted as a percent. For example, 5 percent is 5 not 0.05.
check_pvalues : boolean, default False
Set to true to check p values for each coefficient. If p values is
greater than pval, then the coefficient is set to zero.
pval : float, default 0.05
p value to use as cutoff. Regresion coefficients with a p value
greater than pval will be set to zero.
print_res : boolean, default True
Set to False to prevent printing results.
Returns
-------
Capacity test ratio - the capacity calculated from the reporting conditions
and the measured data divided by the capacity calculated from the reporting
conditions and the simulated data.
"""
sim_int = sim.copy()
das_int = das.copy()
if sim_int.reg_fml != das_int.reg_fml:
return warnings.warn('CapData objects do not have the same'
'regression formula.')
if check_pvalues:
for cd in [sim_int, das_int]:
for key, val in cd.ols_model.pvalues.iteritems():
if val > pval:
cd.ols_model.params[key] = 0
rc = pick_attr(sim_int, das_int, 'rc')
if print_res:
print('Using reporting conditions from {}. \n'.format(rc[1]))
rc = rc[0]
actual = das_int.ols_model.predict(rc)[0]
expected = sim_int.ols_model.predict(rc)[0]
cap_ratio = actual / expected
if cap_ratio < 0.01:
cap_ratio *= 1000
actual *= 1000
warnings.warn('Capacity ratio and actual capacity multiplied by 1000'
' because the capacity ratio was less than 0.01.')
capacity = nameplate * cap_ratio
if print_res:
test_passed = determine_pass_or_fail(cap_ratio, tolerance, nameplate)
print_results(test_passed, expected, actual, cap_ratio, capacity,
test_passed[1])
return(cap_ratio)
def print_results(test_passed, expected, actual, cap_ratio, capacity, bounds):
"""
Print formatted results of capacity test.
"""
if test_passed[0]:
print("{:<30s}{}".format("Capacity Test Result:", "PASS"))
else:
print("{:<25s}{}".format("Capacity Test Result:", "FAIL"))
print("{:<30s}{:0.3f}".format("Modeled test output:",
expected) + "\n" +
"{:<30s}{:0.3f}".format("Actual test output:",
actual) + "\n" +
"{:<30s}{:0.3f}".format("Tested output ratio:",
cap_ratio) + "\n" +
"{:<30s}{:0.3f}".format("Tested Capacity:",
capacity)
)
print("{:<30s}{}\n\n".format("Bounds:", test_passed[1]))
def highlight_pvals(s):
"""
Highlight vals greater than or equal to 0.05 in a Series yellow.
"""
is_greaterthan = s >= 0.05
return ['background-color: yellow' if v else '' for v in is_greaterthan]
def res_summary(sim, das, nameplate, tolerance, print_res=False, **kwargs):
"""
Prints a summary of the capacity test results.
Capacity ratio is the capacity calculated from the reporting conditions
and the measured data divided by the capacity calculated from the reporting
conditions and the simulated data.
The tolerance is applied to the capacity test ratio to determine if the
test passes or fails.
Parameters
----------
sim : CapData
CapData object for simulated data.
das : CapData
CapData object for measured data.
nameplate : numeric
Nameplate rating of the PV plant.
tolerance : str
String representing error band. Ex. '+ 3', '+/- 3', '- 5'
There must be space between the sign and number. Number is
interpreted as a percent. For example, 5 percent is 5 not 0.05.
print_res : boolean, default True
Set to False to prevent printing results.
**kwargs
kwargs are passed to cp_results. See documentation for cp_results for
options. check_pvalues is set in this method, so do not pass again.
Prints:
Capacity ratio without setting parameters with high p-values to zero.
Capacity ratio after setting paramters with high p-values to zero.
P-values for simulated and measured regression coefficients.
Regression coefficients (parameters) for simulated and measured data.
"""
das_pvals = das.ols_model.pvalues
sim_pvals = sim.ols_model.pvalues
das_params = das.ols_model.params
sim_params = sim.ols_model.params
df_pvals = pd.DataFrame([das_pvals, sim_pvals, das_params, sim_params])
df_pvals = df_pvals.transpose()
df_pvals.rename(columns={0: 'das_pvals', 1: 'sim_pvals',
2: 'das_params', 3: 'sim_params'}, inplace=True)
cprat = cp_results(sim, das, nameplate, tolerance,
print_res=print_res, check_pvalues=False, **kwargs)
cprat_cpval = cp_results(sim, das, nameplate, tolerance,
print_res=print_res, check_pvalues=True, **kwargs)
cprat_rounded = np.round(cprat, decimals=4) * 100
cprat_cpval_rounded = np.round(cprat_cpval, decimals=4) * 100
print('{:.3f}% - Cap Ratio'.format(cprat_rounded))
print('{:.3f}% - Cap Ratio after pval check'.format(cprat_cpval_rounded))
return(df_pvals.style.format('{:20,.5f}').apply(highlight_pvals,
subset=['das_pvals',
'sim_pvals']))
class CapData(object):
"""
Class to store capacity test data and translation of column names.
CapData objects store a pandas dataframe of measured or simulated data
and a translation dictionary used to translate and group the raw column
names provided in the data.
The translation dictionary allows maintaining the column names in the raw
data while also grouping measurements of the same type from different
sensors.
Parameters
----------
name : str
Name for the CapData object.
df : pandas dataframe
Used to store measured or simulated data imported from csv.
df_flt : pandas dataframe
Holds filtered data. Filtering methods act on and write to this
attribute.
trans : dictionary
A dictionary with keys that are algorithimically determined based on
the data of each imported column in the dataframe and values that are
the column labels in the raw data.
trans_keys : list
Simply a list of the translation dictionary (trans) keys.
reg_trans : dictionary
Dictionary that is manually set to link abbreviations for
for the independent variables of the ASTM Capacity test regression
equation to the translation dictionary keys.
trans_abrev : dictionary
Enumerated translation dict keys mapped to original column names.
Enumerated translation dict keys are used in plot hover tooltip.
col_colors : dictionary
Original column names mapped to a color for use in plot function.
summary_ix : list of tuples
Holds the row index data modified by the update_summary decorator
function.
summary : list of dicts
Holds the data modifiedby the update_summary decorator function.
rc : DataFrame
Dataframe for the reporting conditions (poa, t_amb, and w_vel).
ols_model : statsmodels linear regression model
Holds the linear regression model object.
reg_fml : str
Regression formula to be fit to measured and simulated data. Must
follow the requirements of statsmodels use of patsy.
tolerance : str
String representing error band. Ex. '+ 3', '+/- 3', '- 5'
There must be space between the sign and number. Number is
interpreted as a percent. For example, 5 percent is 5 not 0.05.
"""
def __init__(self, name):
super(CapData, self).__init__()
self.name = name
self.df = pd.DataFrame()
self.df_flt = None
self.trans = {}
self.trans_keys = []
self.reg_trans = {}
self.trans_abrev = {}
self.col_colors = {}
self.summary_ix = []
self.summary = []
self.rc = None
self.ols_model = None
self.reg_fml = 'power ~ poa + I(poa * poa) + I(poa * t_amb) + I(poa * w_vel) - 1'
self.tolerance = None
self.pre_agg_cols = None
self.pre_agg_trans = None
self.pre_agg_reg_trans = None
def set_reg_trans(self, power='', poa='', t_amb='', w_vel=''):
"""
Create a dictionary linking the regression variables to data.
Links the independent regression variables to the appropriate
translation keys or a column name may be used to specify a
single column of data.
Sets attribute and returns nothing.
Parameters
----------
power : str
Translation key for the power variable.
poa : str
Translation key for the plane of array (poa) irradiance variable.
t_amb : str
Translation key for the ambient temperature variable.
w_vel : str
Translation key for the wind velocity key.
"""
self.reg_trans = {'power': power,
'poa': poa,
't_amb': t_amb,
'w_vel': w_vel}
def copy(self):
"""Creates and returns a copy of self."""
cd_c = CapData('')
cd_c.name = copy.copy(self.name)
cd_c.df = self.df.copy()
cd_c.df_flt = self.df_flt.copy()
cd_c.trans = copy.copy(self.trans)
cd_c.trans_keys = copy.copy(self.trans_keys)
cd_c.reg_trans = copy.copy(self.reg_trans)
cd_c.trans_abrev = copy.copy(self.trans_abrev)
cd_c.col_colors = copy.copy(self.col_colors)
cd_c.col_colors = copy.copy(self.col_colors)
cd_c.summary_ix = copy.copy(self.summary_ix)
cd_c.summary = copy.copy(self.summary)
cd_c.rc = copy.copy(self.rc)
cd_c.ols_model = copy.deepcopy(self.ols_model)
cd_c.reg_fml = copy.copy(self.reg_fml)
return cd_c
def empty(self):
"""Returns a boolean indicating if the CapData object contains data."""
if self.df.empty and len(self.trans_keys) == 0 and len(self.trans) == 0:
return True
else:
return False
def load_das(self, path, filename, source=None, **kwargs):
"""
Reads measured solar data from a csv file.
Utilizes pandas read_csv to import measure solar data from a csv file.
Attempts a few diferent encodings, trys to determine the header end
by looking for a date in the first column, and concantenates column
headings to a single string.
Parameters
----------
path : str
Path to file to import.
filename : str
Name of file to import.
**kwargs
Use to pass additional kwargs to pandas read_csv.
Returns
-------
pandas dataframe
"""
data = os.path.normpath(path + filename)
encodings = ['utf-8', 'latin1', 'iso-8859-1', 'cp1252']
for encoding in encodings:
try:
all_data = pd.read_csv(data, encoding=encoding, index_col=0,
parse_dates=True, skip_blank_lines=True,
low_memory=False, **kwargs)
except UnicodeDecodeError:
continue
else:
break
if not isinstance(all_data.index[0], pd.Timestamp):
for i, indice in enumerate(all_data.index):
try:
isinstance(dateutil.parser.parse(str(all_data.index[i])),
datetime.date)
header_end = i + 1
break
except ValueError:
continue
if source == 'AlsoEnergy':
header = 'infer'
else:
header = list(np.arange(header_end))
for encoding in encodings:
try:
all_data = pd.read_csv(data, encoding=encoding,
header=header, index_col=0,
parse_dates=True, skip_blank_lines=True,
low_memory=False, **kwargs)
except UnicodeDecodeError:
continue
else:
break
if source == 'AlsoEnergy':
row0 = all_data.iloc[0, :]
row1 = all_data.iloc[1, :]
row2 = all_data.iloc[2, :]
row0_noparen = []
for val in row0:
if type(val) is str:
row0_noparen.append(val.split('(')[0].strip())
else:
row0_noparen.append(val)
row1_nocomm = []
for val in row1:
if type(val) is str:
strings = val.split(',')
if len(strings) == 1:
row1_nocomm.append(val)
else:
row1_nocomm.append(strings[-1].strip())
else:
row1_nocomm.append(val)
row2_noNan = []
for val in row2:
if val is pd.np.nan:
row2_noNan.append('')
else:
row2_noNan.append(val)
new_cols = []
for one, two, three in zip(row0_noparen, row1_nocomm, row2_noNan):
new_cols.append(str(one) + ' ' + str(two) + ', ' + str(three))
all_data.columns = new_cols
all_data = all_data.apply(pd.to_numeric, errors='coerce')
all_data.dropna(axis=1, how='all', inplace=True)
all_data.dropna(how='all', inplace=True)
if source is not 'AlsoEnergy':
all_data.columns = [' '.join(col).strip() for col in all_data.columns.values]
else:
all_data.index = pd.to_datetime(all_data.index)
return all_data
def load_pvsyst(self, path, filename, **kwargs):
"""
Load data from a PVsyst energy production model.
Parameters
----------
path : str
Path to file to import.
filename : str
Name of file to import.
**kwargs
Use to pass additional kwargs to pandas read_csv.
Returns
-------
pandas dataframe
"""
dirName = os.path.normpath(path + filename)
encodings = ['utf-8', 'latin1', 'iso-8859-1', 'cp1252']
for encoding in encodings:
try:
# pvraw = pd.read_csv(dirName, skiprows=10, encoding=encoding,
# header=[0, 1], parse_dates=[0],
# infer_datetime_format=True, **kwargs)
pvraw = pd.read_csv(dirName, skiprows=10, encoding=encoding,
header=[0, 1], **kwargs)
except UnicodeDecodeError:
continue
else:
break
pvraw.columns = pvraw.columns.droplevel(1)
dates = pvraw.loc[:, 'date']
try:
dt_index = pd.to_datetime(dates, format='%m/%d/%y %H:%M')
except ValueError:
dt_index = pd.to_datetime(dates)
pvraw.index = dt_index
pvraw.drop('date', axis=1, inplace=True)
pvraw = pvraw.rename(columns={"T Amb": "TAmb"})
return pvraw
def load_data(self, path='./data/', fname=None, set_trans=True,
trans_report=True, source=None, load_pvsyst=False,
clear_sky=False, loc=None, sys=None, **kwargs):
"""
Import data from csv files.
The intent of the default behavior is to combine csv files that have
the same columns and rows of data from different times. For example,
combining daily files of 5 minute measurements from the same sensors
for each day.
Use the path and fname arguments to specify a single file to import.
Parameters
----------
path : str, default './data/'
Path to directory containing csv files to load.
fname: str, default None
Filename of specific file to load. If filename is none method will
load all csv files into one dataframe.
set_trans : bool, default True
Generates translation dicitionary for column names after loading
data.
trans_report : bool, default True
If set_trans is true, then method prints summary of translation
dictionary process including any possible data issues. No effect
on method when set to False.
source : str, default None
Default of None uses general approach that concatenates header data.
Set to 'AlsoEnergy' to use column heading parsing specific to
downloads from AlsoEnergy.
load_pvsyst : bool, default False
By default skips any csv file that has 'pvsyst' in the name. Is
not case sensitive. Set to true to import a csv with 'pvsyst' in
the name and skip all other files.
clear_sky : bool, default False
Set to true and provide loc and sys arguments to add columns of
clear sky modeled poa and ghi to loaded data.
loc : dict
See the csky function for details on dictionary options.
sys : dict
See the csky function for details on dictionary options.
**kwargs
Will pass kwargs onto load_pvsyst or load_das, which will pass to
Pandas.read_csv. Useful to adjust the separator (Ex. sep=';').
Returns
-------
None
"""
if fname is None:
files_to_read = []
for file in os.listdir(path):
if file.endswith('.csv'):
files_to_read.append(file)
elif file.endswith('.CSV'):
files_to_read.append(file)
all_sensors = pd.DataFrame()
if not load_pvsyst:
for filename in files_to_read:
if filename.lower().find('pvsyst') != -1:
print("Skipped file: " + filename)
continue
nextData = self.load_das(path, filename, source=source,
**kwargs)
all_sensors = pd.concat([all_sensors, nextData], axis=0)
print("Read: " + filename)
elif load_pvsyst:
for filename in files_to_read:
if filename.lower().find('pvsyst') == -1:
print("Skipped file: " + filename)
continue
nextData = self.load_pvsyst(path, filename, **kwargs)
all_sensors = pd.concat([all_sensors, nextData], axis=0)
print("Read: " + filename)
else:
if not load_pvsyst:
all_sensors = self.load_das(path, fname, source=source, **kwargs)
elif load_pvsyst:
all_sensors = self.load_pvsyst(path, fname, **kwargs)
ix_ser = all_sensors.index.to_series()
all_sensors['index'] = ix_ser.apply(lambda x: x.strftime('%m/%d/%Y %H %M'))
self.df = all_sensors
if not load_pvsyst:
if clear_sky:
if loc is None:
warnings.warn('Must provide loc and sys dictionary\
when clear_sky is True. Loc dict missing.')
if sys is None:
warnings.warn('Must provide loc and sys dictionary\
when clear_sky is True. Sys dict missing.')
self.df = csky(self.df, loc=loc, sys=sys, concat=True,
output='both')
if set_trans:
self.set_translation(trans_report=trans_report)
self.df_flt = self.df.copy()
def __series_type(self, series, type_defs, bounds_check=True,
warnings=False):
"""
Assigns columns to a category by analyzing the column names.
The type_defs parameter is a dictionary which defines search strings
and value limits for each key, where the key is a categorical name
and the search strings are possible related names. For example an
irradiance sensor has the key 'irr' with search strings 'irradiance'
'plane of array', 'poa', etc.
Parameters
----------
series : pandas series
Pandas series, row or column of dataframe passed by pandas.df.apply.
type_defs : dictionary
Dictionary with the following structure. See type_defs
{'category abbreviation': [[category search strings],
(min val, max val)]}
bounds_check : bool, default True
When true checks series values against min and max values in the
type_defs dictionary.
warnings : bool, default False
When true prints warning that values in series are outside expected
range and adds '-valuesError' to returned str.
Returns
-------
string
Returns a string representing the category for the series.
Concatenates '-valuesError' if bounds_check and warnings are both
True and values within the series are outside the expected range.
"""
for key in type_defs.keys():
# print('################')
# print(key)
for search_str in type_defs[key][0]:
# print(search_str)
if series.name.lower().find(search_str.lower()) == -1:
continue
else:
if bounds_check:
type_min = type_defs[key][1][0]
type_max = type_defs[key][1][1]
ser_min = series.min()
ser_max = series.max()
min_bool = ser_min >= type_min
max_bool = ser_max <= type_max
if min_bool and max_bool:
return key
else:
if warnings:
if not min_bool and not max_bool:
print('{} in {} is below {} for '
'{}'.format(ser_min, series.name,
type_min, key))
print('{} in {} is above {} for '
'{}'.format(ser_max, series.name,
type_max, key))
elif not min_bool:
print('{} in {} is below {} for '
'{}'.format(ser_min, series.name,
type_min, key))
elif not max_bool:
print('{} in {} is above {} for '
'{}'.format(ser_max, series.name,
type_max, key))
return key
else:
return key
return ''
def set_plot_attributes(self):
dframe = self.df
for key in self.trans_keys:
df = dframe[self.trans[key]]
cols = df.columns.tolist()
for i, col in enumerate(cols):
abbrev_col_name = key + str(i)
self.trans_abrev[abbrev_col_name] = col
col_key0 = key.split('-')[0]
col_key1 = key.split('-')[1]
if col_key0 in ('irr', 'temp'):
col_key = col_key0 + '-' + col_key1
else:
col_key = col_key0
try:
j = i % 4
self.col_colors[col] = plot_colors_brewer[col_key][j]
except KeyError:
j = i % 10
self.col_colors[col] = Category10[10][j]
def set_translation(self, trans_report=True):
"""
Creates a dict of raw column names paired to categorical column names.
Uses multiple type_def formatted dictionaries to determine the type,
sub-type, and equipment type for data series of a dataframe. The determined
types are concatenated to a string used as a dictionary key with a list
of one or more oringal column names as the paried value.
Parameters
----------
trans_report : bool, default True
Sets the warnings option of __series_type when applied to determine
the column types.
Returns
-------
None
Sets attributes self.trans and self.trans_keys
Todo
----
type_defs parameter
Consider refactoring to have a list of type_def dictionaries as an
input and loop over each dict in the list.
"""
col_types = self.df.apply(self.__series_type, args=(type_defs,),
warnings=trans_report).tolist()
sub_types = self.df.apply(self.__series_type, args=(sub_type_defs,),
bounds_check=False).tolist()
irr_types = self.df.apply(self.__series_type, args=(irr_sensors_defs,),
bounds_check=False).tolist()
col_indices = []
for typ, sub_typ, irr_typ in zip(col_types, sub_types, irr_types):
col_indices.append('-'.join([typ, sub_typ, irr_typ]))
names = []
for new_name, old_name in zip(col_indices, self.df.columns.tolist()):
names.append((new_name, old_name))
names.sort()
orig_names_sorted = [name_pair[1] for name_pair in names]
trans = {}
col_indices.sort()
cols = list(set(col_indices))
cols.sort()
for name in set(cols):
start = col_indices.index(name)
count = col_indices.count(name)
trans[name] = orig_names_sorted[start:start + count]
self.trans = trans
trans_keys = list(self.trans.keys())
if 'index--' in trans_keys:
trans_keys.remove('index--')
trans_keys.sort()
self.trans_keys = trans_keys
self.set_plot_attributes()
def drop_cols(self, columns):
"""
Drops columns from CapData dataframe and translation dictionary.
Parameters
----------
Columns : list
List of columns to drop.
Todo
----
Change to accept a string column name or list of strings
"""
for key, value in self.trans.items():
for col in columns:
try:
value.remove(col)
self.trans[key] = value
except ValueError:
continue
self.df.drop(columns, axis=1, inplace=True)
self.df_flt.drop(columns, axis=1, inplace=True)
def get_reg_cols(self, reg_vars=['power', 'poa', 't_amb', 'w_vel'],
filtered_data=True):
"""
Get and rename the regression columns.
Parameters
----------
reg_vars : list
Default is all of 'power', 'poa', 't_amb', 'w_vel'. Any
combination of the four is valid.
filtered_data : bool, default true
Return filtered or unfiltered dataself.
Returns
-------
DataFrame
Todo
----
Pass list of reg coeffs to rename default all of them.
"""
for reg_var in reg_vars:
if self.reg_trans[reg_var] in self.df_flt.columns:
continue
else:
columns = self.trans[self.reg_trans[reg_var]]
if len(columns) != 1:
return warnings.warn('Multiple columns per translation '
'dictionary group. Run agg_sensors '
'before this method.')
df = self.rview(reg_vars, filtered_data=filtered_data).copy()
rename = {old: new for old, new in zip(df.columns, reg_vars)}
df.rename(columns=rename, inplace=True)
return df
def view(self, tkey, filtered_data=False):
"""
Convience function returns columns using translation dictionary names.
Parameters
----------
tkey: int or str or list of int or strs
String or list of strings from self.trans_keys or int postion or
list of int postitions of value in self.trans_keys.
"""
if isinstance(tkey, int):
keys = self.trans[self.trans_keys[tkey]]
elif isinstance(tkey, list) and len(tkey) > 1:
keys = []
for key in tkey:
if isinstance(key, str):
keys.extend(self.trans[key])
elif isinstance(key, int):
keys.extend(self.trans[self.trans_keys[key]])
elif tkey in self.trans_keys:
keys = self.trans[tkey]
if filtered_data:
return self.df_flt[keys]
else:
return self.df[keys]
def rview(self, ind_var, filtered_data=False):
"""
Convience fucntion to return regression independent variable.
Parameters
----------
ind_var: string or list of strings
may be 'power', 'poa', 't_amb', 'w_vel', a list of some subset of
the previous four strings or 'all'
"""
if ind_var == 'all':
keys = list(self.reg_trans.values())
elif isinstance(ind_var, list) and len(ind_var) > 1:
keys = [self.reg_trans[key] for key in ind_var]
elif ind_var in met_keys:
ind_var = [ind_var]
keys = [self.reg_trans[key] for key in ind_var]
lst = []
for key in keys:
if key in self.df.columns:
lst.extend([key])
else:
lst.extend(self.trans[key])
if filtered_data:
return self.df_flt[lst]
else:
return self.df[lst]
def __comb_trans_keys(self, grp):
comb_keys = []
for key in self.trans_keys:
if key.find(grp) != -1:
comb_keys.append(key)
cols = []
for key in comb_keys:
cols.extend(self.trans[key])
grp_comb = grp + '_comb'
if grp_comb not in self.trans_keys:
self.trans[grp_comb] = cols
self.trans_keys.extend([grp_comb])
print('Added new group: ' + grp_comb)
def review_trans(self):
"""
Print translation dictionary with nice formatting.
"""
if len(self.trans) == 0:
return 'Translation dictionary is empty.'
else:
for trans_grp, col_list in self.trans.items():
print(trans_grp)
for col in col_list:
print(' ' + col)
# PLOTTING METHODS
def reg_scatter_matrix(self):
"""
Create pandas scatter matrix of regression variables.
"""
df = self.get_reg_cols(reg_vars=['poa', 't_amb', 'w_vel'])
df['poa_poa'] = df['poa'] * df['poa']
df['poa_t_amb'] = df['poa'] * df['t_amb']
df['poa_w_vel'] = df['poa'] * df['w_vel']
df.drop(['t_amb', 'w_vel'], axis=1, inplace=True)
return(pd.plotting.scatter_matrix(df))
def scatter(self, filtered=True):
"""
Create scatter plot of irradiance vs power.
Parameters
----------
filtered : bool, default true
Plots filtered data when true and all data when false.
"""
if filtered:
df = self.rview(['power', 'poa'], filtered_data=True)
else:
df = self.rview(['power', 'poa'], filtered_data=False)
if df.shape[1] != 2:
return warnings.warn('Aggregate sensors before using this '
'method.')
df = df.rename(columns={df.columns[0]: 'power', df.columns[1]: 'poa'})
plt = df.plot(kind='scatter', x='poa', y='power',
title=self.name, alpha=0.2)
return(plt)
def scatter_hv(self, timeseries=False):
"""
Create holoview scatter plot of irradiance vs power. Optional linked
time series plot of the same data.
Try running twice if the points selected with the lasso tool are not
highlighted in the timeseries i.e. linked brushing is not working.
Use holoviews opts magics in notebook cell before calling method to
adjust height and width of plots:
%%opts Scatter [height=200, width=400]
%%opts Curve [height=200, width=400]
Parameters
----------
timeseries : boolean, default False
True adds timeseries plot of power data with linked brushing.
"""
new_names = ['power', 'poa', 't_amb', 'w_vel']
df = self.get_reg_cols(reg_vars=new_names, filtered_data=True)
df['index'] = self.df_flt.loc[:, 'index']
df.index.name = 'date_index'
df['date'] = df.index.values
opt_dict = {'Scatter': {'style': dict(size=5),
'plot': dict(tools=['box_select',
'lasso_select',
'hover'],
legend_position='right',
height=400, width=400
)},
'Curve': {'plot': dict(tools=['box_select', 'lasso_select',
'hover'],
height=400,
width=800)},
'Layout': {'plot': dict(shared_datasource=True)}}
poa_vs_kw = hv.Scatter(df, 'poa', ['power', 'w_vel', 'index'])
poa_vs_time = hv.Curve(df, 'date', ['power', 'poa'])
layout_scatter = (poa_vs_kw).opts(opt_dict)
layout_timeseries = (poa_vs_kw + poa_vs_time).opts(opt_dict)
if timeseries:
DataLink(poa_vs_kw, poa_vs_time)
return(layout_timeseries.cols(1))
else:
return(layout_scatter)
def plot(self, marker='line', ncols=2, width=400, height=350,
legends=False, merge_grps=['irr', 'temp'], subset=None,
filtered=False, **kwargs):
"""
Plots a Bokeh line graph for each group of sensors in self.trans.
Function returns a Bokeh grid of figures. A figure is generated for each
key in the translation dictionary and a line is plotted for each raw
column name paired with that key.
For example, if there are multiple plane of array irradiance sensors,
the data from each one will be plotted on a single figure.
Figures are not generated for categories that would plot more than 10
lines.
Parameters
----------
marker : str, default 'line'
Accepts 'line', 'circle', 'line-circle'. These are bokeh marker
options.
ncols : int, default 2
Number of columns in the bokeh gridplot.
width : int, default 400
Width of individual plots in gridplot.
height: int, default 350
Height of individual plots in gridplot.
legends : bool, default False
Turn on or off legends for individual plots.
merge_grps : list, default ['irr', 'temp']
List of strings to search for in the translation dictionary keys.
A new key and group is created in the translation dictionary for
each group. By default will combine all irradiance measurements
into a group and temperature measurements into a group.
Pass empty list to not merge any plots.
Use 'irr-poa' and 'irr-ghi' to plot clear sky modeled with measured
data.
subset : list, default None
List of the translation dictionary keys to use to control order of
plots or to plot only a subset of the plots.
filtered : bool, default False
Set to true to plot the filtered data.
kwargs
Pass additional options to bokeh gridplot. Merge_tools=False will
shows the hover tool icon, so it can be turned off.
Returns
-------
show(grid)
Command to show grid of figures. Intended for use in jupyter
notebook.
"""
for str_val in merge_grps:
self.__comb_trans_keys(str_val)
if filtered:
dframe = self.df_flt
else:
dframe = self.df
dframe.index.name = 'Timestamp'
names_to_abrev = {val: key for key, val in self.trans_abrev.items()}
plots = []
x_axis = None
source = ColumnDataSource(dframe)
hover = HoverTool()
hover.tooltips = [
("Name", "$name"),
("Datetime", "@Timestamp{%D %H:%M}"),
("Value", "$y"),
]
hover.formatters = {"Timestamp": "datetime"}
if isinstance(subset, list):
plot_keys = subset
else:
plot_keys = self.trans_keys
for j, key in enumerate(plot_keys):
df = dframe[self.trans[key]]
cols = df.columns.tolist()
if x_axis is None:
p = figure(title=key, plot_width=width, plot_height=height,
x_axis_type='datetime', tools='pan, xwheel_pan, xwheel_zoom, box_zoom, save, reset')
p.tools.append(hover)
x_axis = p.x_range
if j > 0:
p = figure(title=key, plot_width=width, plot_height=height,
x_axis_type='datetime', x_range=x_axis, tools='pan, xwheel_pan, xwheel_zoom, box_zoom, save, reset')
p.tools.append(hover)
legend_items = []
for i, col in enumerate(cols):
abbrev_col_name = key + str(i)
if col.find('csky') == -1:
line_dash = 'solid'
else:
line_dash = (5, 2)
if marker == 'line':
series = p.line('Timestamp', col, source=source,
line_color=self.col_colors[col],
line_dash=line_dash,
name=names_to_abrev[col])
elif marker == 'circle':
series = p.circle('Timestamp', col,
source=source,
line_color=self.col_colors[col],
size=2, fill_color="white",
name=names_to_abrev[col])
if marker == 'line-circle':
series = p.line('Timestamp', col, source=source,
line_color=self.col_colors[col],
name=names_to_abrev[col])
series = p.circle('Timestamp', col,
source=source,
line_color=self.col_colors[col],
size=2, fill_color="white",
name=names_to_abrev[col])
legend_items.append((col, [series, ]))
legend = Legend(items=legend_items, location=(40, -5))
legend.label_text_font_size = '8pt'
if legends:
p.add_layout(legend, 'below')
plots.append(p)
grid = gridplot(plots, ncols=ncols, **kwargs)
return show(grid)
def reset_flt(self):
"""
Copies over filtered dataframe with raw data and removes all summary
history.
Parameters
----------
data : str
'sim' or 'das' determines if filter is on sim or das data.
"""
self.df_flt = self.df.copy()
self.summary_ix = []
self.summary = []
def reset_agg(self):
"""
Remove aggregation columns from df and df_flt attributes.
Does not reset filtering of of df_flt.
"""
if self.pre_agg_cols is None:
return warnings.warn('Nothing to reset; agg_sensors has not been'
'used.')
else:
self.df = self.df[self.pre_agg_cols].copy()
self.df_flt = self.df_flt[self.pre_agg_cols].copy()
self.trans = self.pre_agg_trans.copy()
self.reg_trans = self.pre_agg_reg_trans.copy()
def __get_poa_col(self):
"""
Returns poa column name from translation dictionary.
Also, issues warning if there are more than one poa columns in the
translation dictionary.
"""
poa_trans_key = self.reg_trans['poa']
if poa_trans_key in self.df.columns:
return poa_trans_key
else:
poa_cols = self.trans[poa_trans_key]
if len(poa_cols) > 1:
return warnings.warn('{} columns of irradiance data. '
'Use col_name to specify a single '
'column.'.format(len(poa_cols)))
else:
return poa_cols[0]
def agg_sensors(self, agg_map=None, keep=True, update_reg_trans=True,
inplace=True, inv_sum_vs_power=False):
"""
Aggregate measurments of the same variable from different sensors.
Parameters
----------
agg_map : dict, default None
Dictionary specifying types of aggregations to be performed for
the column groups defined by the trans attribute. The dictionary
keys should be keys of the trans dictionary attribute. The
dictionary values should be aggregation functions or lists of
aggregation functions.
By default an agg_map dictionary within the method to aggregate the
regression parameters as follows:
- sum power
- mean of poa, t_amb, w_vel
keep : bool, default True
Appends aggregation results columns rather than returning
or overwriting df_flt and df attributes with just the aggregation
results.
update_reg_trans : bool, default True
By default updates the reg_trans dictionary attribute to map the
regression variable to the aggregation column. The reg_trans
attribute is not updated if inplace is False.
inplace : bool, default True
True writes over dataframe in df and df_flt attribute.
False returns an aggregated dataframe.
inv_sum_vs_power : bool, default False
When true method attempts to identify a summation of inverters and
move it to the same translation dictionary grouping as the meter
data to facilitate. If False the inv sum aggregation column is
left in the inverter translation dictionary group.
Note: When set to true this option will cause issues with methods
that expect a single column of data identified by reg_trans power.
Returns
-------
DataFrame
If inplace is False, then returns a pandas DataFrame.
Todo
----
Re-apply filters
Explore re-applying filters after aggregation, if filters have
been run before using agg_sensors.
"""
if not len(self.summary) == 0:
warnings.warn('The df_flt attribute has been overwritten and '
'previously applied filtering steps have been '
'lost. It is recommended to use agg_sensors '
'before any filtering methods. In the future the '
'agg_sensors method could possibly re-apply '
'filters, if there is interest in this '
'functionality.')
# reset summary data
self.summary_ix = []
self.summary = []
self.pre_agg_cols = self.df.columns
self.pre_agg_trans = self.trans.copy()
self.pre_agg_reg_trans = self.reg_trans.copy()
if agg_map is None:
agg_map = {self.reg_trans['power']: 'sum',
self.reg_trans['poa']: 'mean',
self.reg_trans['t_amb']: 'mean',
self.reg_trans['w_vel']: 'mean'}
dfs_to_concat = []
for trans_key, agg_funcs in agg_map.items():
df = self.view(trans_key, filtered_data=False)
df = df.agg(agg_funcs, axis=1)
if not isinstance(agg_funcs, list):
df = pd.DataFrame(df)
if isinstance(agg_funcs, str):
df = pd.DataFrame(df)
col_name = trans_key + agg_funcs + '-agg'
df.rename(columns={df.columns[0]: col_name}, inplace=True)
else:
col_name = trans_key + agg_funcs.__name__ + '-agg'
df.rename(columns={df.columns[0]: col_name}, inplace=True)
else:
df.rename(columns=(lambda x: trans_key + x + '-agg'),
inplace=True)
dfs_to_concat.append(df)
if keep:
dfs_to_concat.append(self.df)
if inplace:
if update_reg_trans:
for reg_var, trans_group in self.reg_trans.items():
if trans_group in agg_map.keys():
if isinstance(agg_map[trans_group], list):
if len(agg_map[trans_group]) > 1:
warn_str = 'Multiple aggregation functions\
specified for regression\
variable. Reset reg_trans\
manually.'
warnings.warn(warn_str)
break
try:
agg_col = trans_group + agg_map[trans_group] + '-agg'
except TypeError:
agg_col = trans_group + col_name + '-agg'
self.reg_trans[reg_var] = agg_col
self.df = pd.concat(dfs_to_concat, axis=1)
self.df_flt = self.df.copy()
self.set_translation(trans_report=False)
inv_sum_in_cols = [True for col
in self.df.columns if '-inv-sum-agg' in col]
if inv_sum_in_cols and inv_sum_vs_power:
for key in self.trans_keys:
if 'inv' in key:
inv_key = key
for col_name in self.trans[inv_key]:
if '-inv-sum-agg' in col_name:
inv_sum_col = col_name
mtr_cols = [col for col
in self.trans_keys
if 'mtr' in col or 'real_pwr' in col]
if len(mtr_cols) > 1:
warnings.warn('Multiple meter cols unclear what trans\
group to place inv sum in.')
else:
inv_cols = self.trans[inv_key]
inv_cols.remove(inv_sum_col)
self.trans[inv_key] = inv_cols
self.trans[mtr_cols[0]].append(inv_sum_col)
else:
return pd.concat(dfs_to_concat, axis=1)
@update_summary
def filter_irr(self, low, high, ref_val=None, col_name=None, inplace=True):
"""
Filter on irradiance values.
Parameters
----------
low : float or int
Minimum value as fraction (0.8) or absolute 200 (W/m^2)
high : float or int
Max value as fraction (1.2) or absolute 800 (W/m^2)
ref_val : float or int
Must provide arg when min/max are fractions
col_name : str, default None
Column name of irradiance data to filter. By default uses the POA
irradiance set in reg_trans attribute or average of the POA columns.
inplace : bool, default True
Default true write back to df_flt or return filtered dataframe.
Returns
-------
DataFrame
Filtered dataframe if inplace is False.
"""
if col_name is None:
irr_col = self.__get_poa_col()
else:
irr_col = col_name
df_flt = flt_irr(self.df_flt, irr_col, low, high,
ref_val=ref_val)
if inplace:
self.df_flt = df_flt
else:
return df_flt
@update_summary
def filter_pvsyst(self, inplace=True):
"""
Filter pvsyst data for off max power point tracking operation.
This function is only applicable to simulated data generated by PVsyst.
Filters the 'IL Pmin', IL Vmin', 'IL Pmax', 'IL Vmax' values if they
are greater than 0.
Parameters
----------
inplace: bool, default True
If inplace is true, then function overwrites the filtered data. If
false returns a CapData object.
Returns
-------
CapData object if inplace is set to False.
"""
df = self.df_flt
columns = ['IL Pmin', 'IL Vmin', 'IL Pmax', 'IL Vmax']
index = df.index
for column in columns:
if column in df.columns:
indices_to_drop = df[df[column] > 0].index
if not index.equals(indices_to_drop):
index = index.difference(indices_to_drop)
else:
warnings.warn('{} is not a column in the'
'data.'.format(column))
if inplace:
self.df_flt = self.df_flt.loc[index, :]
else:
return self.df_flt.loc[index, :]
@update_summary
def filter_shade(self, fshdbm=1.0, query_str=None, inplace=True):
"""
Remove data during periods of array shading.
The default behavior assumes the filter is applied to data output from
PVsyst and removes all periods where values in the column 'FShdBm' are
less than 1.0.
Use the query_str parameter when shading losses (power) rather than a
shading fraction are available.
Parameters
----------
fshdbm : float, default 1.0
The value for fractional shading of beam irradiance as given by the
PVsyst output parameter FShdBm. Data is removed when the shading
fraction is less than the value passed to fshdbm. By default all
periods of shading are removed.
query_str : str
Query string to pass to pd.DataFrame.query method. The query string
should be a boolean expression comparing a column name to a numeric
filter value, like 'ShdLoss<=50'. The column name must not contain
spaces.
inplace: bool, default True
If inplace is true, then function overwrites the filtered
dataframe. If false returns a DataFrame.
Returns
-------
pd.DataFrame
If inplace is false returns a dataframe.
"""
df = self.df_flt
if query_str is None:
query_str = "FShdBm>=@fshdbm"
index_shd = df.query(query_str).index
if inplace:
self.df_flt = self.df_flt.loc[index_shd, :]
else:
return self.df_flt.loc[index_shd, :]
@update_summary
def filter_time(self, start=None, end=None, days=None, test_date=None,
inplace=True, wrap_year=False):
"""
Function wrapping pandas dataframe selection methods.
Parameters
----------
start: str
Start date for data to be returned. Must be in format that can be
converted by pandas.to_datetime. Not required if test_date and days
arguments are passed.
end: str
End date for data to be returned. Must be in format that can be
converted by pandas.to_datetime. Not required if test_date and days
arguments are passed.
days: int
Days in time period to be returned. Not required if start and end
are specified.
test_date: str
Must be format that can be converted by pandas.to_datetime. Not
required if start and end are specified. Requires days argument.
Time period returned will be centered on this date.
inplace : bool
Default true write back to CapTest.flt_sim or flt_das
Todo
----
Add inverse options to remove time between start end rather than return
it
"""
if start is not None and end is not None:
start = pd.to_datetime(start)
end = pd.to_datetime(end)
if wrap_year and spans_year(start, end):
df_temp = cntg_eoy(self.df_flt, start, end)
else:
df_temp = self.df_flt.loc[start:end, :]
if start is not None and end is None:
if days is None:
return warnings.warn("Must specify end date or days.")
else:
start = pd.to_datetime(start)
end = start + pd.DateOffset(days=days)
if wrap_year and spans_year(start, end):
df_temp = cntg_eoy(self.df_flt, start, end)
else:
df_temp = self.df_flt.loc[start:end, :]
if start is None and end is not None:
if days is None:
return warnings.warn("Must specify end date or days.")
else:
end = pd.to_datetime(end)
start = end - pd.DateOffset(days=days)
if wrap_year and spans_year(start, end):
df_temp = cntg_eoy(self.df_flt, start, end)
else:
df_temp = self.df_flt.loc[start:end, :]
if test_date is not None:
test_date = | pd.to_datetime(test_date) | pandas.to_datetime |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.