prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import argparse
import sys
import collections
import numpy as np
import pandas as pd
import matplotlib
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from ast import literal_eval
parser = argparse.ArgumentParser(description='Compare predictions from HMM model')
parser.add_argument('-i', '--input', help='input_files', nargs='+', required=True)
parser.add_argument('-n', '--names', help='optional names for the input files', nargs='+', default=None)
parser.add_argument('-o', '--output-folder', help='where to save the plots', default='.')
args = parser.parse_args()
if args.names and len(args.input) != len(args.names):
sys.exit("Number of names and files do not match!")
class PredictionContainer(object):
__slots__ = ['name', 'seqs', 'charges', 'scores']
def __init__(self, name, tuples):
self.name = name
self.seqs = set()
seqs, zs, scores = zip(*tuples)
self.seqs.update(seqs)
self.charges = [int(z) for z in zs]
self.scores = [float(s) for s in scores]
def _getAACounts(seq):
c = collections.Counter(seq)
return {aa: c[aa] for aa in seq}
#TODO Annontate the plots
def plotLengthDistribution(data_series, file):
dfdata = {name: [len(s) for s in pc.seqs] for name, pc in data_series.items()}
df = pd.DataFrame.from_dict(dfdata, orient='index')
df = df.transpose()
min_l = df.min(numeric_only=True).min()
max_l = df.max(numeric_only=True).max()
df.plot.hist(bins=np.arange(min_l, max_l + 1, 1), alpha=0.5)
plt.savefig(file, bbox_inches='tight', format='pdf')
plt.close()
def plotChargeDistribution(data_series, file):
dfdata = {name: pc.charges for name, pc in data_series.items()}
df = pd.DataFrame.from_dict(dfdata, orient='index')
df = df.transpose()
fig, axes = plt.subplots(nrows=2, ncols=1)
fig.suptitle("Distribution of charge states")
df.plot.kde(ax=axes[0])
axes[1].hist(df.values, histtype='bar')
plt.savefig(file, bbox_inches='tight', format='pdf')
plt.close()
def plotScoreDistribution(data_series, file):
dfdata = {name: pc.scores for name, pc in data_series.items()}
df = pd.DataFrame.from_dict(dfdata, orient='index')
df = df.transpose()
fig, axes = plt.subplots(nrows=2, ncols=1)
df.plot.hist(ax=axes[0], bins=100)
#df.plot.kde(ax=axes[0], xlim=(-50, 400))
df.plot.box(ax=axes[1], vert=False)
fig.suptitle("Distribution of Andromeda scores")
plt.savefig(file, bbox_inches='tight', format='pdf')
plt.close()
def plotChargeLengthBivariate(data_series, file):
for name, ds in data_series.items():
lens = [len(s) for s in ds.seqs]
zs = ds.charges
dfdata = list(zip(lens, zs))
data = np.array(dfdata, dtype=np.dtype('int,int'))
df = pd.DataFrame(data, columns=["length", "z"])
ax = sns.jointplot(x="length", y="z", data=df)
plt.title(f'Hexbin density plot for {name}')
plt.savefig(file, bbox_inches='tight', format='pdf')
plt.close()
def plotAADistribution(data_series, file):
def getDF(series):
d = {i: _getAACounts(s) for i, s in enumerate(series.seqs)}
df = | pd.DataFrame.from_dict(d, orient='index') | pandas.DataFrame.from_dict |
# -*- coding:UTF-8 -*-
import os.path
import pandas as pd
import cv2
import requests
def check_rotate(dt_boxes_df):
check_df = dt_boxes_df.copy()
# 如果竖立长方形数量超过一半则认为需要旋转图片
check_df = check_df[check_df['length'] + check_df['hight'] > 50]
if len(check_df[check_df['hight'] > check_df['length']]) > len(check_df) / 5 or len(check_df[check_df['hight'] > check_df['length']]) >= 3:
return True
return False
# box转为df
def box_to_df(dt_boxes):
dt_boxes_df = pd.DataFrame()
dt_boxes_df['x1'] = pd.Series(dt_boxes[:, 0, 0])
dt_boxes_df['x2'] = pd.Series(dt_boxes[:, 1, 0])
dt_boxes_df['x3'] = pd.Series(dt_boxes[:, 2, 0])
dt_boxes_df['x4'] = pd.Series(dt_boxes[:, 3, 0])
dt_boxes_df['y1'] = pd.Series(dt_boxes[:, 0, 1])
dt_boxes_df['y2'] = pd.Series(dt_boxes[:, 1, 1])
dt_boxes_df['y3'] = pd.Series(dt_boxes[:, 2, 1])
dt_boxes_df['y4'] = pd.Series(dt_boxes[:, 3, 1])
dt_boxes_df['length'] = dt_boxes_df['x3'] - dt_boxes_df['x1']
dt_boxes_df['hight'] = dt_boxes_df['y3'] - dt_boxes_df['y1']
return dt_boxes_df
# 在原图上面添加文字检测的框
def img_add_box(img, dt_boxes):
for box in dt_boxes:
box = box.astype(int)
cv2.rectangle(img, tuple(box[0]), tuple(box[2]), (0, 255, 0), 2)
return img
# ocr文本结果处理:拼成整个文本
def ocr_2_text(ocr_result):
text = ''
if len(ocr_result):
text = '#line#'.join([line[1][0] for line in ocr_result])
return text
# ocr结果转换成dataframe,并计算部分通用标签
def ocr_2_df(ocr_result):
ocr_df = pd.DataFrame()
if len(ocr_result):
ocr_df['x1'] = pd.Series([item[0][0][0] for item in ocr_result])
ocr_df['x2'] = pd.Series([item[0][1][0] for item in ocr_result])
ocr_df['y1'] = pd.Series([item[0][0][1] for item in ocr_result])
ocr_df['y2'] = pd.Series([item[0][1][1] for item in ocr_result])
ocr_df['text'] = pd.Series([item[1][0] for item in ocr_result])
ocr_df['score'] = | pd.Series([item[1][1] for item in ocr_result]) | pandas.Series |
import pandas as pd
import matplotlib.pyplot as plt
import time
import os
def symbol_to_path(symbol, basedir='data'):
return os.path.join(basedir, '{}.csv'.format(symbol))
def get_data(symbols, dates):
df = | pd.DataFrame(index=dates) | pandas.DataFrame |
import pandas as pd
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import os
import wandb
from utils import set_seed, parse_training_args
from dataset import ToxicDataset
from trainer import Trainer
from model import convert_regressor_to_binary, convert_binary_to_regressor
if __name__ == "__main__":
args = parse_training_args()
config = vars(args)
if config["use_extra_data"]:
extra_files = [
os.path.join(config["extra_data_dir"], f)
for f in os.listdir(config["extra_data_dir"])
]
config["extra_files"] = extra_files
wandb.login()
if config["num_labels"] is None or config["num_labels"] == 1:
project = "jigsaw-train"
else:
project = "jigsaw-binary-train"
with wandb.init(
project=project,
group=str(args.group_id),
name=f"{args.group_id}-{args.checkpoint}",
config=config,
):
config = wandb.config
set_seed(config.seed)
train_data = | pd.read_csv(config.train_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
@author: mesar
"""
import pandas as pd
import json
from datetime import datetime
import numpy as np
import csv
from pathlib import Path
from progressbar import progressbar as pbar
import time
import sys
def parallel_parsing(i, key, number_of_clients, vehicle_capacity, package_data_list, route_data_list, travel_times_list):
route_info = {}
travel_time_matrix = []
row_values = ['key']
for k in travel_times_list.keys():
row_values.append(k)
travel_time_matrix.append(row_values)
for k in travel_times_list.keys():
row_values = []
row_values.append(k)
for j in travel_times_list[k].keys():
row_values.append(travel_times_list[k][j])
travel_time_matrix.append(row_values)
"""
for k in actual_sequences_list[key][0].keys():
for j in actual_sequences_list[key][0][k].keys():
route_info[j] = {}
route_info[j]['order'] = actual_sequences_list[key][0][k][j]
"""
latlongs = []
zone_ids = []
for k in route_data_list['stops'].keys():
latlongs.append((route_data_list['stops'][k]['lat'], route_data_list['stops'][k]['lng']))
depart_time_1 = str(route_data_list['date_YYYY_MM_DD']) + " " + str(route_data_list['departure_time_utc'])
departure_time = datetime.strptime(depart_time_1, '%Y-%m-%d %H:%M:%S')
departure_time_seconds = (departure_time.hour * 3600) + (departure_time.minute * 60) + departure_time.second
route_info[k] = {}
route_info[k]['latitude'] = route_data_list['stops'][k]['lat']
route_info[k]['longitude'] = route_data_list['stops'][k]['lng']
route_info[k]['zone_id'] = route_data_list['stops'][k]['zone_id']
id_zona = -1 if route_data_list['stops'][k]['zone_id'] == "NaN" else route_data_list['stops'][k]['zone_id']
zone_ids.append(id_zona)
route_info[k]['type'] = route_data_list['stops'][k]['type']
#route_info[k]['score'] = route_data_list['route_score']
route_info[k]['departure_time'] = route_data_list['departure_time_utc']
route_info[k]['departure_date'] = route_data_list['date_YYYY_MM_DD']
route_info[k]['route_id'] = key
route_info[k]['max_capacity'] = vehicle_capacity
time_windows = []
counter = 0
planned_service_times = []
dimensions = []
package_id_and_client = {}
double_visits = {}
for k in package_data_list.keys():
time_window1 = -1
time_window2 = -1
sum_dimensions = 0
number_packages = 0
max_depth = 0.0
max_width = 0.0
max_height = 0.0
planned_service_time_client = 0.0
#package_status = ""
for j in package_data_list[k].keys():
if j in package_id_and_client:
double_visits[k] = [package_id_and_client[j]]
double_visits[package_id_and_client[j]] = [k]
else:
package_id_and_client[j] = k
#new_package_status = 'D' if str(package_data_list[k][j]['scan_status']) == 'DELIVERED' else 'A'
#package_status = package_status +'_' + new_package_status
date_value1 = str(package_data_list[k][j]['time_window']['start_time_utc'])
date_value2 = str(package_data_list[k][j]['time_window']['end_time_utc'])
planned_service_time_client += package_data_list[k][j]['planned_service_time_seconds']
if(date_value1 != 'nan' and date_value2 != 'nan'):
real_date_1 = datetime.strptime(date_value1, '%Y-%m-%d %H:%M:%S')
real_date_2 = datetime.strptime(date_value2, '%Y-%m-%d %H:%M:%S')
date_1 = datetime.strptime(date_value1, '%Y-%m-%d %H:%M:%S') - departure_time
date_2 = datetime.strptime(date_value2, '%Y-%m-%d %H:%M:%S') - departure_time
if (real_date_1 <= departure_time):
time_window1 = 0
time_window2 = date_2.seconds
else:
time_window1 = date_1.seconds
time_window2 = date_2.seconds
#time_window1 = (date_1.hour * 3600) + (date_1.minute * 60) + date_1.second
#time_window2 = (date_2.hour * 3600) + (date_2.minute * 60) + date_2.second
#if(date_1.day != date_2.day):
#time_window2 += (24*3600)
else:
time_window1 = -1
time_window2 = -1
real_date_1 = -1
real_date_2 = -1
try:
#if(time_windows[counter][0] == -1 and time_windows[counter][1] == -1):
#time_windows[counter] = (time_window1, time_window2)
route_info[k]['time_window_start_seconds'] = time_window1
route_info[k]['time_window_end_seconds'] = time_window2
route_info[k]['time_window_start'] = real_date_1
route_info[k]['time_window_end'] = real_date_2
except(BaseException):
#time_windows.append((time_window1, time_window2))
route_info[k]['time_window_start_seconds'] = time_window1
route_info[k]['time_window_end_seconds'] = time_window2
route_info[k]['time_window_start'] = real_date_1
route_info[k]['time_window_end'] = real_date_2
depth = float(-1 if package_data_list[k][j]['dimensions']['depth_cm'] == 'NaN' else package_data_list[k][j]['dimensions']['depth_cm'])
height = float(-1 if package_data_list[k][j]['dimensions']['height_cm'] == 'NaN' else package_data_list[k][j]['dimensions']['height_cm'])
width = float(-1 if package_data_list[k][j]['dimensions']['width_cm'] == 'NaN' else package_data_list[k][j]['dimensions']['width_cm'])
max_depth = depth if ((depth >= max_depth) and (depth != -1)) else max_depth
max_height = height if ((height >= max_height) and (height != -1)) else max_height
max_width = width if ((width >= max_width) and (width != -1)) else max_width
sum_dimensions += (depth * height * width)
number_packages += 1
planned_service_times.append(planned_service_time_client)
dimensions.append(sum_dimensions)
route_info[k]['service_time'] = planned_service_time_client
route_info[k]['dimensions'] = sum_dimensions
route_info[k]['number_packages'] = number_packages
#route_info[k]['package_status'] = package_status
route_info[k]['max_depth'] = max_depth
route_info[k]['max_height'] = max_height
route_info[k]['max_width'] = max_width
#route_info[k]['double_visit'] = double_visits[k] if k in double_visits else -1
time_windows.append((time_window1, time_window2))
counter += 1
order_counter = 1
for k in route_info.keys():
route_info[k]['order'] = order_counter
if route_info[k]['type'] == "Station":
depot_key = k
route_info[k]['order'] = 0
else:
order_counter = order_counter + 1
for z in range(len(travel_time_matrix)):
if travel_time_matrix[z][0] == depot_key:
depot_number = z
#f = open("../../parsed_files/"+key+".txt", "w")
f = open("../data/model_apply_outputs/parsed_files_val/route_"+str(i)+".txt", "w")
f.write("number_of_clients\n")
f.write(str(number_of_clients) + "\n")
f.write("vehicle_capacity\n")
f.write(str(vehicle_capacity) + "\n")
f.write("depot_number\n")
f.write(str(depot_number) + "\n")
f.write("route_id\n")
f.write(str(key) + "\n")
f.write("travel_times\n")
for k in travel_time_matrix:
for j in k:
f.write(str(j) + " ")
f.write("\n")
f.write("time_windows\n")
for k in time_windows:
f.write(str(k[0]) + " " + str(k[1]) + "\n")
f.write("service_time\n")
for k in planned_service_times:
f.write(str(k) + "\n")
f.write("dimensions\n")
for k in dimensions:
f.write(str(k) + "\n")
f.write("latitude_longitude\n")
for k in latlongs:
f.write(str(k[0]) + " " + str(k[1]) + "\n")
f.write("zone_id\n")
for k in zone_ids:
f.write(str(k) + "\n")
f.close()
with open("../data/model_apply_outputs/travel_times_files_val/travel_times_route_"+str(i)+".csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(travel_time_matrix)
df = | pd.DataFrame.from_dict(route_info, orient='index') | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 9 10:54:32 2019
@author: nmei
"""
import os
from glob import glob
import pandas as pd
import numpy as np
import seaborn as sns
sns.set_style('whitegrid')
sns.set_context('poster')
import statsmodels.api as sm
from statsmodels.formula.api import ols
from shutil import copyfile
copyfile('../../../utils.py','utils.py')
import utils
from matplotlib.ticker import FormatStrFormatter
experiment = 'metasema'
here = 'encoding_model_15_ROIs'
working_dir = '../../../../results/{}/RP/{}'.format(experiment,here)
figure_dir = '../../../../figures/{}/RP/{}'.format(experiment,here)
if not os.path.exists(figure_dir):
os.mkdir(figure_dir)
working_data = glob(os.path.join(working_dir,'*.csv'))
df = pd.concat([pd.read_csv(f) for f in working_data]).groupby(
['sub_name',
'roi_name',
'model_name',
'condition',
]).mean().reset_index()
N = len(pd.unique(df['sub_name']))
alpha = 100
feature_dict = {'vgg19':'image2vec',
'densenet121':'image2vec',
'mobilenetv2_1':'image2vec',
'Fast_Text':'Word2vec',
'Glove':'Word2vec',
'Word2Vec':'Word2vec',}
df['feature_type'] = df['model_name'].map(feature_dict)
hue_order = ['vgg19', 'densenet121', 'mobilenetv2_1',
'Fast_Text', 'Glove', 'Word2Vec',
]
df = pd.concat([df[df['model_name'] == model_name] for model_name in hue_order])
temp = dict(
F = [],
df_nonimator = [],
df_denominator = [],
p = [],
feature_type = [],
condition = [],
roi_name = [],
)
for (feat,condition,roi),df_sub in df.groupby(['feature_type','condition','roi_name']):
anova = ols('mean_variance ~ model_name',data = df_sub).fit()
aov_table = sm.stats.anova_lm(anova,typ=2)
print(aov_table)
temp['F'].append(aov_table['F']['model_name'])
temp['df_nonimator'].append(aov_table['df']['model_name'])
temp['df_denominator'].append(aov_table['df']['Residual'])
temp['p'].append(aov_table['PR(>F)']['model_name'])
temp['feature_type'].append(feat)
temp['condition'].append(condition)
temp['roi_name'].append(roi)
anova_results = pd.DataFrame(temp)
temp = []
for roi,df_sub in anova_results.groupby('condition'):
df_sub = df_sub.sort_values('p')
converter = utils.MCPConverter(pvals = df_sub['p'].values)
d = converter.adjust_many()
df_sub['p_corrected'] = d['bonferroni'].values
temp.append(df_sub)
anova_results = | pd.concat(temp) | pandas.concat |
'''
train_and_eval_sklearn_binary_classifier.py
Usage
-----
$ python train_and_eval_sklearn_binary_classifier.py \
--dataset_path [path] \
--output_path [path] \
[optional args]
Optional arguments
------------------
--dataset_path DATASET_PATH
Path to folder containing:
*.npy files: X_train, y_train, P_train
*.txt files: X_colnames.txt, y_colnames.txt
--output_path OUTPUT_PATH
Path to folder holding output from this evaluator.
Includes:
* clf_<id>_.dump : loadable clf object
* clf_<id>_callback_train.csv : perf metrics
'''
from __future__ import print_function
import numpy as np
import pandas as pd
import datetime
import sys
import os
import argparse
import itertools
import time
import scipy.sparse
import dill
import copy
from collections import OrderedDict
from distutils.dir_util import mkpath
#from sklearn.externals import joblib
import joblib
from sklearn.base import BaseEstimator, ClassifierMixin, TransformerMixin, MetaEstimatorMixin
from sklearn.metrics import roc_curve, auc, f1_score, roc_auc_score, accuracy_score, average_precision_score
from sklearn.metrics import confusion_matrix as sk_confusion_matrix
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import BernoulliNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import Normalizer, Binarizer, FunctionTransformer
from sklearn.preprocessing import normalize, binarize, minmax_scale
from sklearn.pipeline import Pipeline
from pc_toolbox.utils_io import (
load_csr_matrix, pprint, config_pprint_logging,
load_list_of_strings_from_txt,
load_list_of_unicode_from_txt,
)
import matplotlib.pyplot as plt
from pc_toolbox.binary_classifiers.calc_roc_auc_via_bootstrap import calc_binary_clf_metric_with_ci_via_bootstrap
from pc_toolbox.binary_classifiers.utils_calibration import (
calc_binary_clf_calibration_per_bin,
plot_binary_clf_calibration_curve_and_histograms)
def read_args_from_stdin_and_run():
''' Main executable function to train and evaluate classifier.
Post Condition
--------------
AUC and other eval info printed to stdout.
Trained classifier saved ???.
'''
if not sys.stdin.isatty():
for line in sys.stdin.readlines():
line = line.strip()
sys.argv.extend(line.split(' '))
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset_path',
default='/tmp/',
type=str,
help="Path to folder containing:" +
" *.npy files: X_train, y_train, P_train"
" *.txt files: X_colnames.txt and y_colnames.txt")
parser.add_argument(
'--output_path',
default='/tmp/',
type=str,
help="Path to folder to hold output from classifier. Includes:" +
" perf_metric*.txt files: auc_train.txt & auc_test.txt" +
" settings.txt: description of all settings to reproduce.")
parser.add_argument(
'--feature_arr_names',
type=str,
default='X',
help='Name of feature files to use for training')
parser.add_argument(
'--features_path',
default='/tmp/',
type=str,
help="Path to folder with extra feature files")
parser.add_argument(
'--target_arr_name',
default='Y',
type=str,
)
parser.add_argument(
'--target_names',
default='all',
type=str,
help='Name of response/intervention to test.' +
' To try specific interventions, write names separated by commas.' +
' To try all interventions, use special name "all"')
parser.add_argument(
'--n_folds',
default=1,
type=int,
help='Number of folds for cross validation during classification.')
parser.add_argument(
'--classifier_name',
default='logistic_regression',
choices=[
'k_nearest_neighbors',
'mlp',
'logistic_regression',
'extra_trees',
'svm_with_linear_kernel',
'svm_with_rbf_kernel'],
help='Name of classifier')
parser.add_argument(
'--class_weight_opts',
choices=['none', 'balanced'],
default='none',
)
parser.add_argument(
'--max_grid_search_steps',
default=None,
type=int,
help='max number of steps for grid search')
parser.add_argument(
'--frac_labels_train',
default=1.0,
type=float,
help='Fraction of the training data to use')
parser.add_argument(
'--c_logspace_arg_str',
default="-6,4,7",
type=str,
help='Comma-sep list of args to np.logspace')
parser.add_argument(
'--seed',
default=8675309,
type=int,
help='Seed for random number generation')
parser.add_argument(
'--seed_bootstrap',
default=42,
type=int,
help='Seed for bootstrap')
parser.add_argument(
'--n_bootstraps',
default=5000,
type=int,
help='Number of samples for bootstrap conf. intervals')
parser.add_argument(
'--bootstrap_stratify_pos_and_neg',
default=True,
type=int,
help='Whether to stratify examples or not')
args, unk_list = parser.parse_known_args()
arg_dict = vars(args)
dataset_path = arg_dict['dataset_path']
for key, val in arg_dict.items():
if arg_dict['output_path'].count('$' + key):
arg_dict['output_path'] = \
arg_dict['output_path'].replace('$' + key, str(val))
if not os.path.exists(arg_dict['output_path']):
mkpath(arg_dict['output_path'])
config_pprint_logging(
arg_dict['output_path'],
txtfile='stdout_%s.txt' % arg_dict['target_names'])
pprint('[run_classifier says:] Parsing args ...')
# Parse possible preprocessors
feat_preproc_grid_dict = dict()
for key, val in zip(unk_list[::2], unk_list[1::2]):
if key.startswith('--preproc_'):
feat_preproc_grid_dict[key[2:]] = str(val).split(',')
pprint(key + " : " + val)
arg_dict[key[2:]] = val
for key in feat_preproc_grid_dict.keys():
ii = unk_list.index('--' + key)
del unk_list[ii+1]
del unk_list[ii]
if len(unk_list) > 0:
pprint("UNKNOWN ARGS (ignored)")
for key in unk_list:
pprint(key)
# Set default seed for numpy
np.random.seed(arg_dict['seed'])
# Write parsed args to plain-text file
# so we can exactly reproduce later
with open(os.path.join(arg_dict['output_path'], 'settings.txt'), 'w') as f:
for key, val in arg_dict.items():
f.write(key + ' = ' + str(val) + '\n')
pprint(key + ' = ' + str(val))
with open(os.path.join(arg_dict['output_path'], 'args.txt'), 'w') as f:
for key, val in arg_dict.items():
f.write('--' + key + ' ' + str(val) + '\n')
pprint('')
feat_path_list = [
arg_dict['dataset_path'],
arg_dict['features_path']]
pprint('[run_classifier says:] Loading dataset ...')
start_time = time.time()
feature_arr_names = arg_dict['feature_arr_names'].split(',')
pprint('feature_arr_names:')
feat_colnames_by_arr = OrderedDict()
for feat_arr_name in feature_arr_names:
pprint(feat_arr_name)
cur_feat_colnames = None
for feat_path in feat_path_list:
colname_fpath = os.path.join(
feat_path,
feat_arr_name + '_colnames.txt')
if os.path.exists(colname_fpath):
cur_feat_colnames = \
[str(feat_arr_name + ":") + s
for s in load_list_of_unicode_from_txt(colname_fpath)]
break
feat_colnames_by_arr[feat_arr_name] = cur_feat_colnames
target_arr_name = arg_dict['target_arr_name']
all_target_names = load_list_of_strings_from_txt(os.path.join(
arg_dict['dataset_path'],
target_arr_name + '_colnames.txt'))
target_names = arg_dict['target_names']
if target_names == 'all':
target_names = all_target_names
target_cols = np.arange(len(all_target_names)).tolist()
else:
target_names = target_names.split(',')
target_cols = list()
for name in target_names:
assert name in all_target_names
target_cols.append(all_target_names.index(name))
datasets_by_split = dict()
for split_name in ['train', 'valid', 'test']:
datasets_by_split[split_name] = dict()
split_dataset = datasets_by_split[split_name]
# Load Y
dense_fpath = os.path.join(
dataset_path,
target_arr_name + "_%s.npy" % split_name)
y = np.asarray(np.load(dense_fpath), order='C', dtype=np.float32) # 0/1/nan
if y.ndim < 2:
y = y[:,np.newaxis]
assert y.ndim == 2
assert y.shape[1] == len(all_target_names)
split_dataset['y'] = y[:, target_cols]
assert split_dataset['y'].shape[1] == len(target_cols)
# Load X
x_list = list()
for feat_arr_name in feature_arr_names:
for ii, feat_path in enumerate(feat_path_list):
dense_fpath = os.path.join(
feat_path,
feat_arr_name + "_%s.npy" % split_name)
sparse_fpath = os.path.join(
feat_path,
feat_arr_name + "_csr_%s.npz" % split_name)
x_cur = None
try:
if os.path.exists(sparse_fpath):
print("Here is sparse_fpath", sparse_fpath)
x_cur = load_csr_matrix(sparse_fpath)
print(x_cur)
assert np.all(np.isfinite(x_cur.data))
break
else:
x_cur = np.asarray(
np.load(dense_fpath),
order='C', dtype=np.float64)
if x_cur.ndim < 2:
x_cur = np.atleast_2d(x_cur).T
assert np.all(np.isfinite(x_cur))
break
except IOError as e:
if ii == len(feat_path_list) - 1:
# Couldn't find desired file in any feat_path
raise e
else:
# Try the next feat_path in the list
pass
if x_cur is not None:
if feat_colnames_by_arr[feat_arr_name] is not None:
feat_dim = len(feat_colnames_by_arr[feat_arr_name])
print('feat name, %s, feat_dim %d'%(feat_arr_name, feat_dim))
print('x_cur shape', x_cur.shape[1])
assert x_cur.shape[1] == feat_dim
else:
# Add dummy colnames
feat_dim = x_cur.shape[1]
n_sig_digits = np.maximum(
3, int(np.ceil(np.log10(feat_dim))))
fmt_str = "%s_%0" + str(n_sig_digits) + "d"
feat_colnames_by_arr[feat_arr_name] = [
fmt_str % (feat_arr_name, fid)
for fid in range(feat_dim)]
x_list.append(x_cur)
if isinstance(x_list[0], np.ndarray):
split_dataset['x'] = np.hstack(x_list)
else:
split_dataset['x'] = scipy.sparse.hstack(x_list, format='csr')
#Use only a fraction of the training dataset if specified
frac_labels_train = arg_dict['frac_labels_train']
if split_name == 'train' and frac_labels_train < 1.0:
# Same random seed taken from bow_dataset.py
data_prng = np.random.RandomState(int(42))
n_rows = y.shape[0]
#Note: does not handle truly missing labels
indexed_rows = np.arange(n_rows)
shuffled_rows = data_prng.permutation(indexed_rows)
n_visible = int(np.ceil(frac_labels_train*n_rows))
visible_rows = shuffled_rows[:n_visible]
split_dataset['x'] = split_dataset['x'][visible_rows, :]
split_dataset['y'] = split_dataset['y'][visible_rows, :]
assert split_dataset['x'].ndim == 2
assert split_dataset['x'].shape[0] == split_dataset['y'].shape[0]
assert (
isinstance(split_dataset['x'], np.ndarray)
or isinstance(split_dataset['x'], scipy.sparse.csr_matrix)
)
if split_name == 'train':
# Flatten feat colnames into single list
feat_colnames = sum(feat_colnames_by_arr.values(), [])
assert isinstance(feat_colnames, list)
assert len(feat_colnames) == split_dataset['x'].shape[1]
if len(feat_colnames) > 10:
pprint(
'x colnames: %s ... %s' % (
' '.join(feat_colnames[:5]),
' '.join(feat_colnames[-5:])))
else:
pprint('x colnames: %s' % ' '.join(feat_colnames))
pprint('y colnames: %s' % ' '.join(target_names))
pprint('---- %5s dataset summary' % split_name)
pprint('%9d total examples' % y.shape[0])
pprint('y : %d x %d targets' % split_dataset['y'].shape)
pprint('x : %d x %d features' % split_dataset['x'].shape)
for c in range(len(target_names)):
y_c = split_dataset['y'][:,c]
nan_bmask = np.isnan(y_c)
pos_bmask = y_c == 1
neg_bmask = y_c == 0
pprint('target %s :' % target_names[c])
pprint(' %6d pos examples | %.3f' % (np.sum(pos_bmask), calcfrac(pos_bmask)))
pprint(' %6d neg examples | %.3f' % (np.sum(neg_bmask), calcfrac(neg_bmask)))
pprint(' %6d NaN examples | %.3f' % (np.sum(nan_bmask), calcfrac(nan_bmask)))
assert nan_bmask.sum() + pos_bmask.sum() + neg_bmask.sum() == neg_bmask.size
elapsed_time = time.time() - start_time
pprint('[run_classifier says:] dataset loaded after %.2f sec.' % elapsed_time)
n_cols = len(target_names)
for c in range(n_cols):
pprint('[run_classifier says:] train for target %s' % target_names[c])
train_and_eval_clf_with_best_params_via_grid_search(
arg_dict['classifier_name'],
datasets_by_split=datasets_by_split,
y_col_id=c,
y_orig_col_id=all_target_names.index(target_names[c]),
y_col_name=target_names[c],
feat_colnames=feat_colnames,
feat_preproc_grid_dict=feat_preproc_grid_dict,
output_path=arg_dict['output_path'],
max_grid_search_steps=arg_dict['max_grid_search_steps'],
class_weight_opts=arg_dict['class_weight_opts'],
c_logspace_arg_str=arg_dict['c_logspace_arg_str'],
random_state=arg_dict['seed'],
seed_bootstrap=arg_dict['seed_bootstrap'],
n_bootstraps=arg_dict['n_bootstraps'],
bootstrap_stratify_pos_and_neg=arg_dict['bootstrap_stratify_pos_and_neg'],
)
elapsed_time = time.time() - start_time
pprint('[run_classifier says:] target %s completed after %.2f sec' % (target_names[c], elapsed_time))
def calc_calibration_info(clf, x, y, bins=5):
assert len(clf.classes_) == 2
assert clf.classes_[0] == 0
assert clf.classes_[1] == 1
y_proba = clf.predict_proba(x)
if y_proba.ndim > 1:
assert y_proba.shape[1] == 2
y_proba = y_proba[:, 1]
info_per_bin = calc_binary_clf_calibration_per_bin(
y, y_proba,
bins=bins)
return info_per_bin
def calcfrac(bmask):
return np.sum(bmask) / float(bmask.size)
def default_param_grid(classifier_name, c_logspace_arg_str='-6,4,6', **kwargs):
C_range = np.logspace(*map(float, c_logspace_arg_str.split(',')))
if classifier_name == 'logistic_regression':
return OrderedDict([
('penalty', ['l2', 'l1']),
('class_weight', [None]),
('C', C_range),
('thr_', [0.5]),
])
elif classifier_name == 'extra_trees':
return OrderedDict([
('class_weight', [None]),
('n_estimators', np.asarray([16, 64, 256])),
('max_features', np.asarray([0.04, 0.16, 0.64])),
('min_samples_leaf', np.asarray([4, 16, 64, 256])), # bigger seems to be better
('thr_', [0.5]),
])
elif classifier_name == 'svm_with_linear_kernel':
return OrderedDict([
('kernel', ['linear']),
('C', C_range),
('class_weight', [None]),
('probability', [False]),
])
elif classifier_name == 'svm_with_rbf_kernel':
return OrderedDict([
('kernel', ['rbf']),
('C', C_range),
('gamma', np.logspace(-6, 6, 5)),
('class_weight', [None]),
('probability', [False]),
])
elif classifier_name == 'k_nearest_neighbors':
return OrderedDict([
('n_neighbors', [4, 16, 32, 64]),
('metric', ['euclidean', 'manhattan']),
('weight', ['uniform', 'distance']),
('algorithm', ['brute']),
])
elif classifier_name == 'mlp':
return OrderedDict([
#('norm', ['l1', 'none']),
('hidden_layer_sizes', [(16), (64), (256), (1024)]),
('solver', ['adam']),
('alpha', np.logspace(-6, 0, 3)),
('learning_rate_init', np.asarray([0.01, 0.1])),
('activation', ['relu']),
('batch_size', [200]),
('early_stopping', [True]),
])
else:
raise ValueError("Unrecognized: " + classifier_name)
def make_constructor_and_evaluator_funcs(
classifier_name,
n_bootstraps=5000,
seed_bootstrap=None,
bootstrap_stratify_pos_and_neg=True,
):
def calc_auc_score(clf, x, y):
try:
yscore = clf.decision_function(x)
except AttributeError as e:
yscore = clf.predict_proba(x)
if yscore.ndim > 1:
assert yscore.shape[1] == 2
yscore = yscore[:, 1]
assert y.ndim == 1
assert yscore.ndim == 1
#return roc_auc_score(y, yscore)
return average_precision_score(y, yscore)
def calc_f1_conf_intervals(
clf, x, y, ci_tuples=[(2.5,97.5), (10,90)], pos_label=1):
yhat = clf.predict(x)
assert y.ndim == 1
assert yhat.ndim == 1
n_pos = np.sum(y == 1)
n_neg = np.sum(y == 0)
if n_pos > 1.2 / 2.0 * (n_pos + n_neg):
raise Warning("Positive examples are much more common")
def my_f1_score(ya, yb):
return f1_score(ya, yb, pos_label=pos_label)
return calc_binary_clf_metric_with_ci_via_bootstrap(
y_pred=yhat,
y_true=y,
metric_func=my_f1_score,
ci_tuples=ci_tuples,
n_bootstraps=n_bootstraps,
seed=seed_bootstrap,
stratify_pos_and_neg=bootstrap_stratify_pos_and_neg)
def calc_auc_conf_intervals(clf, x, y, ci_tuples=[(2.5,97.5), (10,90)]):
try:
yscore = clf.decision_function(x)
except AttributeError as e:
yscore = clf.predict_proba(x)
if yscore.ndim > 1:
assert yscore.shape[1] == 2
yscore = yscore[:, 1]
assert y.ndim == 1
assert yscore.ndim == 1
return calc_binary_clf_metric_with_ci_via_bootstrap(
y_pred=yscore,
y_true=y,
metric_func=roc_auc_score,
ci_tuples=ci_tuples,
n_bootstraps=n_bootstraps,
seed=seed_bootstrap,
stratify_pos_and_neg=bootstrap_stratify_pos_and_neg)
def calc_accuracy_score(clf, x, y):
yhat = clf.predict(x)
assert y.ndim == 1
assert yhat.ndim == 1
return np.sum(y == yhat) / float(y.size)
def calc_f1_score(clf, x, y):
yhat = clf.predict(x)
assert y.ndim == 1
assert yhat.ndim == 1
return f1_score(y, yhat, pos_label=clf.classes_[1])
def make_clf_report(clf, x, y, header=''):
r_str = header
r_str += make_confusion_matrix_report(clf, x, y)
r_str += u"acc %.4f\n" % calc_accuracy_score(clf, x, y)
r_str += u" f1 %.4f\n" % calc_f1_score(clf, x, y)
r_str += u"auc %.4f\n" % calc_auc_score(clf, x, y)
r_str += make_calibration_report(clf, x, y)
return r_str
def make_confusion_matrix_report(clf, x, y):
assert len(clf.classes_) == 2
assert clf.classes_[0] == 0
assert clf.classes_[1] == 1
y_pred = clf.predict(x)
cm = sk_confusion_matrix(y, y_pred)
cm = | pd.DataFrame(data=cm, columns=[0, 1], index=[0, 1]) | pandas.DataFrame |
import pandas as pd
import tkinter as tk
from tkinter import filedialog
import tkinter.font as font
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from secrets import home_email, password
from email import encoders
import os
#Get the email data
class email_client(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.pack()
self.email_list = []
self.fail_list = []
self.attach_file = None
self.create_widgets()
def create_widgets(self):
self.import_Frame = tk.Frame(self)
self.import_Frame.pack()
self.import_label = tk.Label(self.import_Frame, text="Select File", width=25)
self.import_label.pack(side="left")
self.import_button = tk.Button(self.import_Frame, text="Import Email List", width=25, command=self.import_file)
self.import_button.pack(side="right")
self.attach_Frame = tk.Frame(self)
self.attach_Frame.pack()
self.attach_label = tk.Label(self.attach_Frame, text="Attach File", width=25)
self.attach_label.pack(side="left")
self.attach_button = tk.Button(self.attach_Frame, text="Attach File", width=25, command=self.select_attach)
self.attach_button.pack(side="right")
self.subject_Frame = tk.Frame(self)
self.subject_Frame.pack()
self.subject_label = tk.Label(self.subject_Frame, text="Subject", width=25)
self.subject_label.pack(side="left")
self.subject_text = tk.Entry(self.subject_Frame, width=30)
self.subject_text.pack(side="right")
self.body_Frame = tk.Frame(self)
self.body_Frame.pack()
self.body_label = tk.Label(self.body_Frame, text="Body", width=25)
self.body_label.pack(side="left")
self.body_text = tk.Text(self.body_Frame, height=6, width=23)
self.body_text.pack(side="right")
self.button_Frame = tk.Frame(self)
self.button_Frame.pack()
self.send_button = tk.Button(self.button_Frame, text="Send Emails", width=25, command=self.send_emails)
self.send_button.pack(side="left")
self.quit_button = tk.Button(self.button_Frame, text="Quit", width=25, command=self.master.destroy)
self.quit_button.pack(side="right")
def import_file(self):
self.filename = filedialog.askopenfilename(initialdir="/Documents", title="Select A file", filetypes= (("xlsx files", "*.xlsx"), ("All Files", "*.*") ))
try:
data = | pd.read_excel(self.filename) | pandas.read_excel |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates HTML to display confidence interval nicely for DataFrames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from IPython.display import display
from IPython.display import HTML
import pandas as pd
import six
from six.moves import range
CSS = '''
/* Make the table scrollable */
#meterstick-container {
max-height: 700px;
overflow-y: auto;
}
#meterstick td {
border-color: lightgrey;
border-style: solid;
border-width: thin;
text-align: center;
padding: 0;
}
/* When scrolling, the header sticks and keeps its white background */
#meterstick th {
background: white;
border-color: lightgrey;
border-bottom-color: black;
border-style: solid;
border-width: thin;
border-top-width: 0;
position: sticky;
top: 0;
}
/* Wrap the long column name. */
th {
max-width: 120px;
white-space: normal !important;
word-wrap: break-word;
}
.ci-display-cell {
height: 100%;
padding: 5px;
}
/* Formats Dimension column if there is one. */
.ci-display-dimension {
color: green;
padding: 2px;
}
/* Renders the experiment id in blue. */
.ci-display-experiment-id {
color: #15c;
}
/* Break line in a flex display. */
.ci-display-flex-line-break {
width: 100%;
}
/* Renders the cells with positive confidence interval to green. */
.ci-display-good-change {
background-color: rgb(221,255,187);
color: green;
}
/* Renders the cells with negative confidence interval to red. */
.ci-display-bad-change {
background-color: rgb(255,221,187);
color: red;
}
.ci-display-ratio {
font-size: 120%; /* Renders the ratio value in larger font. */
}
.ci-display-ci-range {
white-space: nowrap; /* Don't break line in the middle of CI range. */
}
''' # You can overwrite this from outside with custom CSS.
# Line breaker in a flex display.
LINE_BREAK = '<div class="ci-display-flex-line-break"></div>'
def _sorted_long_to_wide(df, dims, sort_by):
"""Returns a df in wide format for metrics.
The input df is in so-called long, or tidy, format, i.e., each
row is one metric for one slice. This function transforms the df to a wide
format, where all metrics for one slice are collected into one row. It also
does extra some things:
1. Sorts the df,
2. Collects metric information (value, ratio, ci_lower, ci_upper) to a tuple,
3. Drops columns no more needed.
Args:
df: A dataframe in long format, could just be metrics_types.as_dataframe().
dims: The column name of slicing dimesions, can be a list or a string.
sort_by: In the form of [{'column': 'CI-lower', 'ascending': False}},
{'column': 'Dim_2': 'order': ['Logged-in', 'Logged-out']}]. The 'column'
is the column to sort by, 'order' is optional and for categorical column,
and 'ascending' is optional and default True. The result will be displayed
in the order specified by sort_by from top to bottom.
Returns:
A df containing enough information in wide format.
"""
default_dims = dims + [
'Control_Id', 'Is_Control', 'Experiment_Id', 'Description'
]
existing_index_cols = [x for x in default_dims if x in df]
if 'Control_Id' in df and all(pd.isnull(df.Control_Id)):
# All None column makes pivot_table fail.
df.drop(columns=['Control_Id'], inplace=True)
existing_index_cols = [c for c in existing_index_cols if c != 'Control_Id']
if not existing_index_cols:
df['_placeholder'] = 42
existing_index_cols = ['_placeholder']
val_cols = ['Value', 'Ratio', 'CI_Lower', 'CI_Upper']
df = df[existing_index_cols + val_cols + ['Metric']]
# Object columns will get dropped during the unstack().
df = df.astype({c: 'float64' for c in val_cols})
for col in existing_index_cols:
# Missing category will still appear after groupby(). We need to drop them.
if isinstance(df[col].dtypes, pd.CategoricalDtype):
df[col] = pd.Categorical(
df[col], [c for c in df[col].cat.categories if c in df[col].unique()],
ordered=df[col].cat.ordered)
# Spread metrics in Metric column to individual columns, i.e., long to wide.
# pivot_table() doesn't work if there's NA.
# https://github.com/pandas-dev/pandas/issues/18030#issuecomment-340442023
df = df.groupby(existing_index_cols + ['Metric']).agg('mean').unstack(-1)
if not sort_by:
sorting_cols = existing_index_cols
ascending = [s != 'Is_Control' for s in sorting_cols]
else:
sorting_cols = []
ascending = []
for s in sort_by:
col = s['column']
sorting_cols.append(col)
ascending.append(s.get('ascending', True))
if 'order' in s:
if col in df:
df[col] = pd.Categorical(df[col], s['order'], ordered=True)
else:
df.reset_index(col, inplace=True)
df[col] = pd.Categorical(df[col], s['order'], ordered=True)
df.set_index(col, append=True, inplace=True)
if sorting_cols:
df = df.sort_values(sorting_cols, ascending=ascending)
# Collects [Value, Ratio, CI_Lower, CI_Upper] for each Metric * slice. val_col
# might be dropped during pivot b/c of na, so we make a dict first.
df = df.groupby(level=1, axis=1).apply(
lambda x: x.droplevel(1, 1).apply(lambda row: row.to_dict(), 1))
df = df.applymap(lambda x: [x.get(k) for k in val_cols]).reset_index()
if '_placeholder' == existing_index_cols[0]:
df.drop(columns='_placeholder', inplace=True)
return df
def _merge_dimensions(df, dims):
"""Merge dimension info columns to a 'Dimensions' column."""
agg_cols = dims + ['Experiment_Id', 'Is_Control', 'Description']
agg_cols = [c for c in agg_cols if c in df]
if agg_cols:
df['Dimensions'] = df[agg_cols].apply(lambda row: row.to_dict(), axis=1)
df.drop(columns=agg_cols + ['Control_Id'], inplace=True, errors='ignore')
# Make 'Dimensions' the first column.
# df[['Dimensions'] + df.columns[:-1].tolist()] will fail when metric aren't
# in the same type.
# https://stackoverflow.com/questions/45175041/reorder-pandas-dataframe-columns-with-mixed-tuple-and-string-columns
dim_vals = df['Dimensions']
del df['Dimensions']
df.insert(0, 'Dimensions', dim_vals)
return df
def _pre_aggregate_df(df,
dims,
aggregate_dimensions,
show_control,
ctrl_id,
sort_by=None,
auto_decide_control_vals=False,
auto_add_description=True):
"""Process a long-format df to an appropriate format for display.
Args:
df: A dataframe similar to the one returned by metrics_types.as_dataframe().
dims: The column name of slicing dimesions, can be a list or a string.
aggregate_dimensions: If True, all dimension columns are collected into a
'Dimensions' column, and original dimension columns are dropped.
show_control: If False, only ratio values in non-control rows are shown.
ctrl_id: The control experiment id(s). For single control case, it can be
basically any type that can be used as an experiment key except dict. For
multiple controls, it should be a dict, with keys being control ids,
values being list of corresponding experiment id(s).
sort_by: In the form of [{'column': 'CI-lower', 'ascending': False}},
{'column': 'Dim_2': 'order': ['Logged-in', 'Logged-out']}]. The 'column'
is the column to sort by, 'order' is optional and for categorical column,
and 'ascending' is optional and default True. The result will be displayed
in the order specified by sort_by from top to bottom.
auto_decide_control_vals: By default, if users want to see control
experiments, df needs to have rows for control, but the 'Value' there
is supposed to be equal to the 'Control_Value' column in experiment rows.
So if control rows are missing, we can use 'Control_Value' column to fill
them. The problem is when there are multiple experiments their
Control_Values might be different (though they shouldn't be). In that case
we raise a warning and skip. Also if user arleady provide control rows for
certain slices, we won't fill those slices.
auto_add_description: If add Control/Not Control as descriptions.
Returns:
A pandas dataframe with stylized content for display. The display effect is
similar to tge_estimation.display().
Raises:
ValueError: If metrics is not an instance of MetricsTablesByExperiments,
MetricsTable, or MetricsPerSlice.
"""
if 'CI_Upper' not in df or 'CI_Lower' not in df:
df['CI_Upper'] = df['Ratio'] + (df['CI_Range'] / 2)
df['CI_Lower'] = df['Ratio'] - (df['CI_Range'] / 2)
df = _add_is_control_and_control_id(df, ctrl_id)
if auto_add_description:
is_ctrl = df['Is_Control'] if 'Is_Control' in df else None
if is_ctrl is not None: # ctrl id could be 0 or ''.
is_ctrl = ['Control' if x else 'Not Control' for x in is_ctrl]
if 'Description' not in df:
df['Description'] = is_ctrl
else:
df['Description'] = df['Description'].where(
df['Description'].astype(bool), is_ctrl) # Only fills empty cells
if show_control:
if 'Is_Control' in df:
# When Ratio is None, CI won't be displayed. This is intended for control.
df.loc[df['Is_Control'], 'Ratio'] = None
else:
df['Value'] = None
if 'Is_Control' in df:
# Make a copy to avoid "A value is trying to be set on a copy of a slice
# from a DataFrame." warning.
df = df[~df['Is_Control']].copy()
if auto_decide_control_vals:
df = add_control_rows(df, dims)
pre_agg_df = _sorted_long_to_wide(df, dims, sort_by)
if aggregate_dimensions:
pre_agg_df = _merge_dimensions(pre_agg_df, dims)
return pre_agg_df
def _div(s, class_name=None):
if class_name:
return '<div class="%s">%s</div>' % (class_name, s)
return '<div>%s</div>' % s
def _span(s, class_name=None):
if class_name:
return '<span class="%s">%s</span>' % (class_name, s)
return '<span>%s</span>' % s
class MetricFormatter(object):
"""A formatter to highlight significant metric change.
Concatenates 'Value', 'Ratio', 'CI_Lower', 'CI_Upper' columns in df to a
stylized form which can be rendered to HTML directly later. Cells with
positive CI change are rendered green, with negative CI change are rendered
red.
Attributes:
metric_formats: A dict specifying how to display metric values. Keys can be
'Value' and 'Ratio'. Values can be 'absolute', 'percent', 'pp' or a
formatting string. For example, '{:.2%}' would have the same effect as
'percent'. By default, Value is in absolute form and Ratio in percent.
if_flip_color: A boolean indicating if to flip green/red coloring scheme.
hide_null_ctrl: If to hide control value or use '-' to represent it when it
is null,
form_lookup: A dict to look up formatting str for the display.
unit_lookup: A dict to look up the unit to append to numbers in display.
Returns:
A string specifying a named <div> containing concatenated values. Div may
include html classes used to style with CSS.
"""
def __init__(self,
metric_formats=None,
if_flip_color=None,
hide_null_ctrl=False):
metric_formats = metric_formats or {}
metric_formats.setdefault('Value', 'absolute')
metric_formats.setdefault('Ratio', 'absolute')
self.if_flip_color = if_flip_color
self.hide_null_ctrl = hide_null_ctrl
self.metric_formats = metric_formats
self.form_lookup = {
'percent': '{:.2f}',
'absolute': '{:.4f}',
'pp': '{:.2f}'
}
self.unit_lookup = {'percent': '%', 'pp': 'pp'}
def _format_value(self, val, form, is_ci=False):
"""Formats val in the required form.
Args:
val: A single value or a list of [ci_lower, ci_upper].
form: 'Absolute', 'percent', 'pp' or a formatting string.
is_ci: If val is a list for CI values.
Returns:
A formatted string for display.
"""
val_format = self.form_lookup.get(form, form)
unit = self.unit_lookup.get(form, '')
if isinstance(val, str):
return val + ' ' + unit
if not is_ci:
if pd.isnull(val):
return 'N/A'
res = val_format.format(val) + unit
else:
ci_lower = 'N/A' if | pd.isnull(val[0]) | pandas.isnull |
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import numpy as np
import pandas as pd
import logging
logging.basicConfig(format='%(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
def read_data_to_df(data_path: str, **read_data_options):
"""
read data depending on its extension and convert it to a pandas dataframe
"""
file_ext = data_path.split('.')[-1]
if file_ext == 'csv' or file_ext == 'txt':
return pd.read_csv(data_path, **read_data_options) if read_data_options else pd.read_csv(data_path)
elif file_ext == 'xlsx':
return pd.read_excel(data_path, **read_data_options) if read_data_options else pd.read_excel(data_path)
elif file_ext == 'json':
return | pd.read_json(data_path, **read_data_options) | pandas.read_json |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 24 14:00:26 2019
@author: <NAME>
"""
import pandas as pd
import time
from datetime import timedelta
import datetime
from pandas import *
import random
data = | pd.read_csv('df_Rhythm4analyze_o_37852_1558704625828706.csv') | pandas.read_csv |
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2021/11/2 21:08
Desc: 同花顺-数据中心-技术选股
http://data.10jqka.com.cn/rank/cxg/
"""
import pandas as pd
import requests
from bs4 import BeautifulSoup
from py_mini_racer import py_mini_racer
from tqdm import tqdm
from akshare.datasets import get_ths_js
def _get_file_content_ths(file: str = "ths.js") -> str:
"""
获取 JS 文件的内容
:param file: JS 文件名
:type file: str
:return: 文件内容
:rtype: str
"""
setting_file_path = get_ths_js(file)
with open(setting_file_path) as f:
file_data = f.read()
return file_data
def stock_rank_cxg_ths(symbol: str = "创月新高") -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-创新高
http://data.10jqka.com.cn/rank/cxg/
:param symbol: choice of {"创月新高", "半年新高", "一年新高", "历史新高"}
:type symbol: str
:return: 创新高数据
:rtype: pandas.DataFrame
"""
symbol_map = {
"创月新高": "4",
"半年新高": "3",
"一年新高": "2",
"历史新高": "1",
}
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxg/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxg/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text)[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = ["序号", "股票代码", "股票简称", "涨跌幅", "换手率", "最新价", "前期高点", "前期高点日期"]
big_df["股票代码"] = big_df["股票代码"].astype(str).str.zfill(6)
big_df["涨跌幅"] = big_df["涨跌幅"].str.strip("%")
big_df["换手率"] = big_df["换手率"].str.strip("%")
big_df["前期高点日期"] = pd.to_datetime(big_df["前期高点日期"]).dt.date
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["换手率"] = pd.to_numeric(big_df["换手率"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["前期高点"] = pd.to_numeric(big_df["前期高点"])
return big_df
def stock_rank_cxd_ths(symbol: str = "创月新低") -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-创新低
http://data.10jqka.com.cn/rank/cxd/
:param symbol: choice of {"创月新低", "半年新低", "一年新低", "历史新低"}
:type symbol: str
:return: 创新低数据
:rtype: pandas.DataFrame
"""
symbol_map = {
"创月新低": "4",
"半年新低": "3",
"一年新低": "2",
"历史新低": "1",
}
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxd/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxd/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text)[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = ["序号", "股票代码", "股票简称", "涨跌幅", "换手率", "最新价", "前期低点", "前期低点日期"]
big_df["股票代码"] = big_df["股票代码"].astype(str).str.zfill(6)
big_df["涨跌幅"] = big_df["涨跌幅"].str.strip("%")
big_df["换手率"] = big_df["换手率"].str.strip("%")
big_df["前期低点日期"] = pd.to_datetime(big_df["前期低点日期"]).dt.date
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["换手率"] = pd.to_numeric(big_df["换手率"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["前期低点"] = pd.to_numeric(big_df["前期低点"])
return big_df
def stock_rank_lxsz_ths() -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-连续上涨
http://data.10jqka.com.cn/rank/lxsz/
:return: 连续上涨
:rtype: pandas.DataFrame
"""
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/lxsz/field/lxts/order/desc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/lxsz/field/lxts/order/desc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text, converters={"股票代码": str})[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"收盘价",
"最高价",
"最低价",
"连涨天数",
"连续涨跌幅",
"累计换手率",
"所属行业",
]
big_df["连续涨跌幅"] = big_df["连续涨跌幅"].str.strip("%")
big_df["累计换手率"] = big_df["累计换手率"].str.strip("%")
big_df["连续涨跌幅"] = pd.to_nume | ric(big_df["连续涨跌幅"]) | pandas.to_numeric |
import json
import requests
import pandas as pd
import websocket
# Get Alpaca API Credential
endpoint = "https://data.alpaca.markets/v2"
headers = json.loads(open("key.txt", 'r').read())
def hist_data(symbols, start="2021-01-01", timeframe="1Hour", limit=50, end=""):
"""
returns historical bar data for a string of symbols separated by comma
symbols should be in a string format separated by comma e.g. symbols = "MSFT,AMZN,GOOG"
"""
df_data_tickers = {}
for symbol in symbols:
bar_url = endpoint + "/stocks/{}/bars".format(symbol)
params = {"start":start, "limit" :limit, "timeframe":timeframe}
data = {"bars": [], "next_page_token":'', "symbol":symbol}
while True:
r = requests.get(bar_url, headers = headers, params = params)
r = r.json()
if r["next_page_token"] == None:
data["bars"]+=r["bars"]
break
else:
params["page_token"] = r["next_page_token"]
data["bars"]+=r["bars"]
data["next_page_token"] = r["next_page_token"]
df_data = pd.DataFrame(data["bars"])
df_data.rename({"t":"time","o":"open","h":"high","l":"low","c":"close","v":"volume"},axis=1, inplace=True)
df_data["time"] = pd.to_datetime(df_data["time"])
df_data.set_index("time",inplace=True)
df_data.index = df_data.index.tz_convert("America/Indiana/Petersburg")
df_data_tickers[symbol] = df_data
return df_data_tickers
def get_historical_data(ticker_list, start_date, end_date=None, limit=10000, timeframe="1Day"):
"""
returns historical bar data for a string of symbols separated by comma
symbols should be in a string format separated by comma e.g. symbols = "MSFT,AMZN,GOOG"
* timeframe - Timeframe for the aggregation. Available values are: `1Min`, `1Hour`, `1Day`
https://alpaca.markets/docs/api-documentation/api-v2/market-data/alpaca-data-api-v2/historical/#bars
"""
df_data_tickers = {}
for symbol in ticker_list:
bar_url = endpoint + "/stocks/{}/bars".format(symbol)
params = {"start":start_date, "end": end_date, "limit": limit, "timeframe":timeframe}
data = {"bars": [], "next_page_token": '', "symbol": symbol}
# r = requests.get(bar_url, headers=headers, params=params)
# r = r.json()
# data["bars"] += r["bars"]
while True:
r = requests.get(bar_url, headers=headers, params=params)
r = r.json()
try:
if r["next_page_token"] == None:
data["bars"] += r["bars"]
break
else:
params["page_token"] = r["next_page_token"]
data["bars"] += r["bars"]
data["next_page_token"] = r["next_page_token"]
except:
break
# Create a DataFrame for the data["bars"] of each stock
df_data = | pd.DataFrame(data["bars"]) | pandas.DataFrame |
from flask import Flask
from flask import request
from flask_cors import CORS
import pymongo
from flask_pymongo import PyMongo
import json
from pydash import _
import numpy as np
import pandas as pd
STATIC_FOLDER = 'server/static'
# STATIC_FOLDER = '../client/dist'
TEMPLATE_FOLDER = '../client/dist'
app = Flask(__name__, static_url_path='', static_folder=STATIC_FOLDER, template_folder=TEMPLATE_FOLDER)
app.config.from_object('config')
CORS(app)
# app.config["MONGO_URI"] = "mongodb://localhost:27017/proctoring"
# mongo = PyMongo(app)
abandon_student_list = ['128126_A', '128126_B', '141137_B']
from server.routes import index
def get_one_axis_mouse(x, axis):
return {
"timestamp": x["d_timestamp"],
axis: x[f"screen{axis}_scaled"]
}
def get_one_axis_bbox(x, axis):
return {
"timestamp": x["d_timestamp"],
f"{axis}_min": x[f"{axis}_min_scaled"],
f"{axis}_max": x[f"{axis}_max_scaled"],
}
def get_one_axis_headpose(x, axis):
return {
"timestamp": x["d_timestamp"],
axis: x[f"{axis}_scaled"],
}
with open(STATIC_FOLDER+"/mouse_raw_data_anony.json") as f:
json_mouse_raw_data = json.load(f)
with open(STATIC_FOLDER+"/mouse_special_event_anony.json") as f:
json_mouse_special_event = json.load(f)
with open(STATIC_FOLDER+"/student_result_list_anony.json") as f:
json_student_result_list = json.load(f)
with open(STATIC_FOLDER+"/bbox_raw_data_anony.json") as f:
json_bbox_raw_data = json.load(f)
with open(STATIC_FOLDER+"/headpose_raw_data_anony.json") as f:
json_headpose_raw_data = json.load(f)
with open(STATIC_FOLDER+"/question_time_length_anony.json") as f:
json_question_time_length = json.load(f)
with open(STATIC_FOLDER+"/mouse_raw_data_replay_anony.json") as f:
json_mouse_raw_data_replay = json.load(f)
# student_id: dong_A1; question_id: mc_1; axis: X or Y;
@app.route("/api/mouse_raw_data/<string:student_id>/<string:question_id>/<string:axis>", methods=['POST','GET'])
def mouse_raw_data(student_id, question_id, axis):
return {"data": _.map(json_mouse_raw_data[student_id][question_id], lambda x: get_one_axis_mouse(x, axis))}
@app.route("/api/mouse_raw_data_replay/<string:student_id>/<string:question_id>", methods=['POST','GET'])
def mouse_raw_data_replay(student_id, question_id):
data = json_mouse_raw_data_replay[student_id][question_id]
tmp = []
for item in data:
tmp.append([item["x"], item["y"], item["d_timestamp"], item["clientX"], item["clientY"]])
return {"data": tmp}
# student_id: dong_A1; question_id: mc_1; axis: X or Y;
@app.route("/api/bbox_raw_data/<string:student_id>/<string:question_id>/<string:axis>", methods=['POST','GET'])
def bbox_raw_data(student_id, question_id, axis):
return {"data": _.map(json_bbox_raw_data[student_id][question_id], lambda x: get_one_axis_bbox(x, axis))}
# student_id: dong_A1; question_id: mc_1; axis: yaw or pitch;
@app.route("/api/headpose_raw_data/<string:student_id>/<string:question_id>/<string:axis>", methods=['POST','GET'])
def headpose_raw_data(student_id, question_id, axis):
return {"data": _.map(json_headpose_raw_data[student_id][question_id], lambda x: get_one_axis_headpose(x, axis))}
@app.route("/api/mouse_special_event/<string:student_id>/<string:question_id>", methods=['POST','GET'])
def mouse_special_event(student_id, question_id):
return {"data": json_mouse_special_event[student_id][question_id]}
@app.route("/api/student_result_list/<string:student_id>", methods=['POST','GET'])
def student_result_list(student_id):
return {"data": json_student_result_list[student_id]}
@app.route("/api/all_result_list", methods=['POST','GET'])
def all_result_list():
all_result_list = []
for (k, v) in json_student_result_list.items():
if k in abandon_student_list:
continue
v['student_id'] = k
all_result_list.append(v)
return {'data': all_result_list}
@app.route("/api/question_time_length/<string:student_id>", methods=['POST','GET'])
def question_time_length(student_id):
return {"data":[{"question_id": k, "time_length": v} for (k, v) in json_question_time_length[student_id].items()]}
@app.route("/api/average_question_time_length/<string:question_set>", methods=['POST','GET'])
def average_question_time_length(question_set):
all_question_time_length = []
list_average_question_time_length = []
list_question_id = ['mc_1', 'mc_2', 'mc_3', 'mc_4', 'mc_5', 'mc_6', 'mc_7', 'mc_8', 'mc_9', 'mc_10', 'sa_1', 'sa_2','sa_3', 'sa_4']
for question_id in list_question_id:
list_question_time_length = []
for (student_id, i) in json_question_time_length.items():
if student_id[-1]!=question_set or student_id in abandon_student_list:
continue
if question_id in i.keys():
list_question_time_length.append(i[question_id])
all_question_time_length.append(i[question_id])
else:
list_question_time_length.append(0)
list_average_question_time_length.append(
{
'question_id': question_id,
'average_time_length': float(np.mean(np.array(list_question_time_length)))
}
)
list_average_question_time_length.append(
{
'question_id': "min",
'average_time_length': float(np.min(np.array(all_question_time_length)))
}
)
list_average_question_time_length.append(
{
'question_id': "max",
'average_time_length': float(np.max(np.array(all_question_time_length)))
}
)
return {"data": list_average_question_time_length}
# student_id: dong_A1; question_id: mc_1; axis: X or Y; for headpose, X->yaw, Y->pitch
@app.route("/api/other_question_aggregate_data/<string:student_id>/<string:question_id>/<string:axis>/<int:bin_number>", methods=['POST','GET'])
@app.route("/api/other_question_aggregate_data/<string:student_id>/<string:question_id>/<string:axis>", methods=['POST','GET'])
def other_question_aggregate_data(student_id, question_id, axis, bin_number=10):
all_histo_mouse = np.zeros((len(json_mouse_raw_data[student_id].keys())-1, bin_number))
all_histo_bbox_max = np.zeros((len(json_mouse_raw_data[student_id].keys())-1, bin_number))
all_histo_bbox_min = np.zeros((len(json_mouse_raw_data[student_id].keys())-1, bin_number))
all_histo_headpose = np.zeros((len(json_mouse_raw_data[student_id].keys())-1, bin_number))
mouse_count = 0
bbox_max_count = 0
bbox_min_count = 0
headpose_count = 0
idx = 0
for (question_id_, j) in json_mouse_raw_data[student_id].items():
if question_id_ == question_id:
continue
try:
histo_mouse = np.histogram( | pd.DataFrame.from_records(json_mouse_raw_data[student_id][question_id_]) | pandas.DataFrame.from_records |
#!/usr/bin/env python
# coding: utf-8
# # Data Preprocessing
# ### Importing the libraries
# In[ ]:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# ### Reading the dataset
# In[ ]:
dataset = | pd.read_csv('startups.csv') | pandas.read_csv |
import os
import sys
import torch
import numpy as np
from BrainMaGe.models.networks import fetch_model
from pathlib import Path
import matplotlib.pyplot as plt
from compare_utils import (
postprocess_prediction,
postprocess_save_output,
postprocess_output,
dice,
get_mask_image,
get_input_image
)
from tqdm import tqdm
import pandas as pd
from pathlib import Path
from timeit import default_timer as timer
from datetime import datetime
from openvino.inference_engine import IECore
brainmage_root = Path('../')
# dataset_csv = 'nfbs-dataset-test-1.csv'
# dataset_csv = 'nfbs-dataset-preprocessed.csv'
# dataset_csv = 'nfbs-dataset-test-preprocessed.csv'
dataset_csv = 'nfbs-dataset-preprocessed.csv'
dataset_csv = 'nfbs-dataset-test-preprocessed.csv'
# For NFBS dataset - 0th col is sub-id, 1st col is input path, 2nd col is mask_path
sub_idx = 0
input_path_idx = 1
mask_path_idx = 2
# dataset_csv = 'upenn-baseline-dataset.csv'
# dataset_csv = 'upenn-baseline-dataset-test.csv'
# dataset_csv = 'upenn-baseline-dataset-test-10.csv'
# dataset_csv = 'upenn-baseline-dataset-test-2.csv'
# # For UPENN-Baseline dataset - 0th col is sub-id, 3rd col is input path, 2nd col is mask_path
# sub_idx = 0
# input_path_idx = 3
# mask_path_idx = 2
pt_output_path = 'pt-outfile' # PyTorch output file
ov_output_path = 'ov-outfile' # ONNX output file
pytorch_model_path = brainmage_root / 'BrainMaGe/weights/resunet_ma.pt'
ov_model_dir = brainmage_root / 'BrainMaGe/weights/ov/fp32/'
device="cpu"
# ### Load Dataset csv
dataset_df = pd.read_csv(dataset_csv, header = None)
print(f"Number of rows: {dataset_df.shape[0]}")
print(f"Input Image Sample: {dataset_df.iloc[0][input_path_idx]}")
print(f"Mask Image Sample: {dataset_df.iloc[0][mask_path_idx]}")
def bench_pytorch_fp32():
### Load PyTorch model
pt_model = fetch_model(modelname="resunet", num_channels=1, num_classes=2, num_filters=16)
checkpoint = torch.load(pytorch_model_path, map_location=torch.device('cpu'))
pt_model.load_state_dict(checkpoint["model_state_dict"])
### Run PyTorch Inference
print (f"\n Starting PyTorch inference with {pytorch_model_path} ...")
_ = pt_model.eval()
pt_stats =[]
with torch.no_grad():
for i, row in tqdm(dataset_df.iterrows()):
sub_id = row[sub_idx]
input_path = row[input_path_idx]
mask_path = row[mask_path_idx]
try:
mask_image = get_mask_image(mask_path)
input_image, patient_nib = get_input_image(input_path)
i_start = timer()
pt_output = pt_model(input_image)
i_end = timer()
p_start = timer()
pt_output = pt_output.cpu().numpy()[0][0]
pt_to_save = postprocess_output(pt_output, patient_nib.shape)
pt_dice_score = dice(pt_to_save, mask_image)
p_end = timer()
pt_stat = [i, sub_id, pt_dice_score, i_end-i_start, p_end-p_start]
pt_stats.append(pt_stat)
except:
print (f" Inference Failed: {sub_id} ")
print (f"Done PyTorch inference with {pytorch_model_path} ...")
pt_stats_df = pd.DataFrame(pt_stats)
date_time_str = datetime.now().strftime("%b-%d-%Y_%H-%M-%S")
csv_name = f"pt_stats_{date_time_str}.csv"
pt_stats_df.to_csv(csv_name, sep=',', header=False, index=False)
print (f"Saved {csv_name} ...")
print (f"\n PyTorch Dice Mean: {pt_stats_df[:][2].mean():.5f}")
print (f"PyTorch Total Inf Time: {pt_stats_df[:][3].sum():.2f} sec, Mean: {pt_stats_df[:][3].mean():.2f} sec")
return pt_stats_df
def bench_ov_fp32():
#### Load OpenVINO model
ov_model_dir = brainmage_root / 'BrainMaGe/weights/ov/fp32'
modelname = "resunet_ma"
model_xml = f'{ov_model_dir}/{modelname}.xml'
model_bin = f'{ov_model_dir}/{modelname}.bin'
# Load network to the plugin
ie = IECore()
net = ie.read_network(model=model_xml, weights=model_bin)
exec_net = ie.load_network(network=net, device_name="CPU")
del net
input_layer = next(iter(exec_net.input_info))
output_layer = next(iter(exec_net.outputs))
# # #### Run OpenVINO Inference
print (f"Starting OpenVINO FP32 inference with {ov_model_dir} ...")
ov_stats =[]
for i, row in tqdm(dataset_df.iterrows()):
sub_id = row[sub_idx]
input_path = row[input_path_idx]
mask_path = row[mask_path_idx]
try:
mask_image = get_mask_image(mask_path)
input_image, patient_nib = get_input_image(input_path)
i_start = timer()
ov_output = exec_net.infer(inputs={input_layer: input_image})
i_end = timer()
p_start = timer()
ov_output = ov_output[output_layer][0][0]
ov_to_save = postprocess_output(ov_output, patient_nib.shape)
ov_dice_score = dice(ov_to_save, mask_image)
p_end = timer()
ov_stat = [i, sub_id, ov_dice_score, i_end-i_start, p_end-p_start]
ov_stats.append(ov_stat)
except:
print (f" Inference Failed: {sub_id} ")
print (f"Done OpenVINO inference with {ov_model_dir} ...")
ov_stats_df = pd.DataFrame(ov_stats)
date_time_str = datetime.now().strftime("%b-%d-%Y_%H-%M-%S")
csv_name = f"ov_fp32_stats_{date_time_str}.csv"
ov_stats_df.to_csv(csv_name, sep=',', header=False, index=False)
print (f"Saved {csv_name} ...")
print (f"\n OpenVINO FP32 Dice Mean: {ov_stats_df[:][2].mean():.5f}")
print (f"OpenVINO FP32 Total Inf Time: {ov_stats_df[:][3].sum():.2f} sec, Mean: {ov_stats_df[:][3].mean():.2f}")
return ov_stats_df
def bench_ov_int8():
# #### Load INT8 OpenVINO model
ov_model_dir = brainmage_root / 'openvino/int8_openvino_model'
modelname = "resunet_ma_int8"
model_xml = f'{ov_model_dir}/{modelname}.xml'
model_bin = f'{ov_model_dir}/{modelname}.bin'
# Load network to the plugin
ie = IECore()
net = ie.read_network(model=model_xml, weights=model_bin)
exec_net = ie.load_network(network=net, device_name="CPU")
del net
input_layer = next(iter(exec_net.input_info))
output_layer = next(iter(exec_net.outputs))
# #### Run OpenVINO Inference
print (f"Starting OpenVINO inference with {ov_model_dir} ...")
ov_int8_stats =[]
for i, row in tqdm(dataset_df.iterrows()):
sub_id = row[sub_idx]
input_path = row[input_path_idx]
mask_path = row[mask_path_idx]
try:
mask_image = get_mask_image(mask_path)
input_image, patient_nib = get_input_image(input_path)
i_start = timer()
ov_output = exec_net.infer(inputs={input_layer: input_image})
i_end = timer()
p_start = timer()
ov_output = ov_output[output_layer][0][0]
ov_to_save = postprocess_output(ov_output, patient_nib.shape)
ov_dice_score = dice(ov_to_save, mask_image)
p_end = timer()
ov_int8_stat = [i, sub_id, ov_dice_score, i_end-i_start, p_end-p_start]
ov_int8_stats.append(ov_int8_stat)
except:
print (f" Inference Failed: {sub_id} ")
print (f"Done OpenVINO inference with {ov_model_dir} ...")
ov_int8_stats_df = | pd.DataFrame(ov_int8_stats) | pandas.DataFrame |
def location_at_distance(start_lon, start_lat, direction, distance=5):
'''
The purpose of this is to find a latitude and longitude at a specific distance from
another point. The inputs are the starting latitude and longitude, and the distance
and angle to proceed from that point.
http://www.edwilliams.org/avform147.htm#LL
Inputs:
- start_lat: metric degrees
- start_lon: metric degrees
- distance: metres
- direction: metric degrees
'''
from math import cos, sin, asin, pi
radians = (pi/180) # multiply by degrees to get radians
nm = 1/(60*1860) # multiply by metres to get nautical miles
start_lat = start_lat * radians
start_lon = start_lon * radians
distance = distance * nm * radians
direction = direction * radians
end_lat=asin(sin(start_lat)*cos(distance)+cos(start_lat)*sin(distance)*cos(direction))
if (cos(end_lat)==0):
end_lon=start_lon # endpoint a pole
else:
end_lon=((start_lon-asin(sin(direction)*sin(distance)/cos(end_lat))+pi)%(2*pi))-pi
return end_lat/radians, end_lon/radians
def test():
import pandas as pd
# Different distances in three bearings from a point near Cape Town.
sites = [
[18.565784, -33.603048, 0, 5],
[18.565784, -33.603048, 120, 50],
[18.565784, -33.603048, 240, 500],
]
df = pd.DataFrame(sites, columns=['long', 'lat', 'azimuth', 'distance'])
applied_df = \
df.apply(lambda x: location_at_distance(x.long, x.lat, x.azimuth), axis='columns', result_type='expand') \
.rename(columns={0: 'new_long', 1: 'new_lat'})
df = | pd.concat([df, applied_df], axis='columns') | pandas.concat |
import numpy as np
import pandas as pd
cjxx1 = pd.read_csv('../SourceData/bks_cjxx_out1-1.csv',usecols = ['xh','xn','xqm','ksrq','kch','kxh','kccj','xf','kcsxdm','xdfsdm'])
cjxx2 = pd.read_csv('../SourceData/bks_cjxx_out1-2.csv',usecols = ['xh','xn','xqm','ksrq','kch','kxh','kccj','xf','kcsxdm','xdfsdm'])
cjxx = cjxx1.append(cjxx2).reset_index(drop=True)
cjxx = cjxx.astype({'xn':'str'})
dict_fail={}
dict_pass = {}
dict_score = {}
dict_res={}
print(00000)
counter=0
'''for index in cjxx.index:
if(cjxx.at[index,'xh'] == 201437059 and (cjxx.at[index,'kcsxdm'] == 1 or cjxx.at[index, 'kcsxdm'] == 3 or cjxx.at[index, 'kcsxdm'] == 0) and cjxx.at[index,'kccj']>=60):
print(cjxx.at[index,'xn'],cjxx.at[index,'xqm'],cjxx.at[index,'kch'],cjxx.at[index,'xf'])'''
for index in cjxx.index:
counter+=1
if(counter%10000==0):
print(counter)
if(cjxx.at[index,'xh'] <201400000 or cjxx.at[index,'xh']> 201900000):
continue
if(cjxx.at[index,'xh'] not in dict_score.keys()):
dict_score[cjxx.at[index, 'xh']] = [0] *100
dict_fail[cjxx.at[index,'xh']] = [0] * 100
dict_pass[cjxx.at[index, 'xh']] = [0] * 20
if(cjxx.at[index,'kcsxdm'] == 1):
start, end = cjxx.at[index, 'xn'].split('-')
if(int(start)<2014 or int(start) >=2019):
continue
dict_score[cjxx.at[index, 'xh']][2 * (3 * int(start) + cjxx.at[index, 'xqm'] - 6043) + 1] += cjxx.at[index, 'xf']
dict_score[cjxx.at[index, 'xh']][2 * (3 * int(start) + cjxx.at[index, 'xqm'] - 6043)] += cjxx.at[index, 'xf']*cjxx.at[index,'kccj']
if (cjxx.at[index, 'kccj'] >= 60):
dict_pass[cjxx.at[index, 'xh']][3 * int(start) + cjxx.at[index, 'xqm'] - 6043] += cjxx.at[index, 'xf']
else:
dict_fail[cjxx.at[index, 'xh']][3 * int(start) + cjxx.at[index, 'xqm'] - 6043] += 1
if (cjxx.at[index, 'kcsxdm'] == 3 or cjxx.at[index, 'kcsxdm'] == 0):
start, end = cjxx.at[index, 'xn'].split('-')
if (int(start) < 2014 or int(start) >= 2019):
continue
if (cjxx.at[index, 'kccj'] >= 60):
dict_pass[cjxx.at[index, 'xh']][3 * int(start) + cjxx.at[index, 'xqm'] - 6043] += cjxx.at[index, 'xf']
print(1111111)
counter=0
for key in dict_score.keys():
counter += 1
if (counter % 1000 == 0):
print(counter)
dict_res[key] = [0]*13
for i in range(13):
if(dict_score[key][2*i+1] != 0):
dict_res[key][i] = dict_score[key][2*i]/dict_score[key][2*i+1]
else:
dict_res[key][i] = 0
print(222222)
counter=0
sum1=0
sum2=0
sum3=0
result1 = pd.DataFrame(columns = ['xh','xueyejinggao1','xueyejinggao2'])
for key in dict_pass.keys():
counter += 1
if (counter % 1000 == 0):
print(counter)
temp = int(int(key)/100000)%10
if(temp == 4):
if(dict_pass[key][0] >= 15 and (dict_pass[key][1] + dict_pass[key][2]) >= 15 and dict_pass[key][3] >= 15 and (dict_pass[key][4] + dict_pass[key][5]) >= 15
and dict_pass[key][6] >= 15 and (dict_pass[key][7] + dict_pass[key][8]) >= 15):
result1 = result1.append(pd.DataFrame({'xh':[key],'xueyejinggao1':1}),ignore_index=True)
else:
result1 = result1.append(pd.DataFrame({'xh': [key], 'xueyejinggao1': 2}),ignore_index=True)
if (temp == 5):
if (dict_pass[key][3] >= 15 and (dict_pass[key][4] + dict_pass[key][5]) >= 15 and dict_pass[key][6] >= 15 and (dict_pass[key][7] + dict_pass[key][8]) >= 15 and dict_pass[key][9] >= 15 and (dict_pass[key][10] + dict_pass[key][11]) >= 15):
result1 = result1.append(pd.DataFrame({'xh': [key], 'xueyejinggao1': 1}),ignore_index=True)
else:
result1 = result1.append(pd.DataFrame({'xh': [key], 'xueyejinggao1': 2}),ignore_index=True)
if (temp == 6):
if (dict_pass[key][6] >= 15 and (dict_pass[key][7] + dict_pass[key][8]) >= 15 and dict_pass[key][9] >= 15 and (dict_pass[key][10] + dict_pass[key][11]) >= 15 and dict_pass[key][12] >= 15):
result1 = result1.append(pd.DataFrame({'xh': [key], 'xueyejinggao1': 1}),ignore_index=True)
else:
result1 = result1.append(pd.DataFrame({'xh': [key], 'xueyejinggao1': 2}),ignore_index=True)
if(temp == 7):
if (dict_pass[key][9] >= 15 and (dict_pass[key][10] + dict_pass[key][11]) >= 15 and dict_pass[key][12] >= 15):
result1 = result1.append(pd.DataFrame({'xh': [key], 'xueyejinggao2': 1}), ignore_index=True)
else:
result1 = result1.append(pd.DataFrame({'xh': [key], 'xueyejinggao2': 2}), ignore_index=True)
if(temp == 8):
if(dict_pass[key][12] >= 15):
sum1+=1
else:
sum3+=1
sum2+=1
result1 = result1.append(pd.DataFrame({'xh': [key]}), ignore_index=True)
#df = pd.DataFrame(dict_pass)
#df.to_csv('暂时.csv')
print(sum1,sum2,sum3)
result1.to_csv('../Data/xueyejinggao.csv',index=None,encoding='utf-8')
result = pd.DataFrame(columns = ['xh','2014-1','2014-2','2014-3','2015-1','2015-2','2015-3','2016-1','2016-2','2016-3','2017-1','2017-2','2017-3','2018-1'])
fail = | pd.DataFrame(columns = ['xh','2014-1','2014-2','2014-3','2015-1','2015-2','2015-3','2016-1','2016-2','2016-3','2017-1','2017-2','2017-3','2018-1']) | pandas.DataFrame |
from numpy.core.numeric import outer
import pandas as pd
import numpy as np
import functools
def outer_fn(keywords):
def filtre(data) -> bool:
for key in keywords:
if key in data.lower():
return True
return False
return filtre
def result(keywords ,file_name : str,dataframe,field) :
l = list(filter(outer_fn(keywords),dataframe[f'{field}'].values))
new_sheet = | pd.DataFrame(columns=[x for x in dataframe.columns.values]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# -------------------------------------------------------------------
# **TD DSA 2021 de <NAME> - rapport de <NAME>**
# ------------------------- -------------------------------------
# # Analyse descriptive
# ## Setup
# In[5]:
get_ipython().system('pip install textblob')
# In[6]:
get_ipython().system('pip install emot')
# In[7]:
get_ipython().system('pip install wordcloud')
# In[8]:
#Temps et fichiers
import os
import warnings
import time
from datetime import timedelta
#Manipulation de données
import pandas as pd
import numpy as np
# Text
from collections import Counter
import nltk
nltk.download('punkt')
from nltk.tokenize import word_tokenize
nltk.download('stopwords')
from nltk.corpus import stopwords
nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.stem import WordNetLemmatizer
nltk.download('wordnet')
from nltk.util import ngrams
from textblob import TextBlob
import string
import re
import spacy
from emot.emo_unicode import UNICODE_EMO, EMOTICONS
#Visualisation
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
from wordcloud import WordCloud
#Tracking d'expérience
import mlflow
import mlflow.sklearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
# ### Utilisation du package
# In[9]:
#Cette cellule permet d'appeler la version packagée du projet et d'en assurer le reload avant appel des fonctions
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
# In[10]:
from dsa_sentiment.scripts.make_dataset import load_data
from dsa_sentiment.scripts.evaluate import eval_metrics
from dsa_sentiment.scripts.make_dataset import Preprocess_StrLower, Preprocess_transform_target
# ### Configuration de l'experiment MLFlow
# In[11]:
mlflow.tracking.get_tracking_uri()
# ### Chargement des données
# In[12]:
# On Importe les données
#df
df_train=pd.read_parquet('/mnt/data/interim/df_train.gzip')
df_val=pd.read_parquet('/mnt/data/interim/df_val.gzip')
df_test=pd.read_parquet('/mnt/data/interim/df_test.gzip')
#X
X_train=pd.read_parquet('/mnt/data/interim/X_train.gzip')
X_val=pd.read_parquet('/mnt/data/interim/X_val.gzip')
X_test=pd.read_parquet('/mnt/data/interim/X_test.gzip')
#y
y_train=pd.read_parquet('/mnt/data/interim/y_train.gzip')
y_val=pd.read_parquet('/mnt/data/interim/y_val.gzip')
y_test=pd.read_parquet('/mnt/data/interim/y_test.gzip')
# ## EDA
# On commence par nalyser l'équilibre des différentes classes de sentiments
# In[13]:
df = df_train
df.head()
# ### Analyse de l'équilibre du jeu d'entrainement par label
# In[14]:
fig = px.histogram(df, x="sentiment", color="sentiment", title = 'Nombre de tweets par sentiment')
fig.show()
# Il existe un léger déséquilibre dans les classes en faveur des sentiments `neutral`
# ### Analyse des champs lexicaux par label
# Pour la suite des travaux, on créée un corpus contenant la concaténation de tous les tweets d'une certaine tonalité.
# In[15]:
def create_corpus(text_series):
text = text_series.apply(lambda x : x.split())
text = sum(text, [])
return text
# In[16]:
positive_text = create_corpus(df['text'][df['sentiment']=='positive'])
negative_text = create_corpus(df['text'][df['sentiment']=='negative'])
neutral_text = create_corpus(df['text'][df['sentiment']=='neutral'])
# Il devient alors possible de crééer des histogrammes représentant la fréquence de N-grams dans un corpus =donné
# In[17]:
def plot_freq_dist(text_corpus, nb=30, ngram=1, title=''):
'''
Plot the most common words
inputs:
text_corpus : a corpus of words
nb : number of words to plot
title : graph title
returns:
nothing, plots the graph
'''
freq_pos=Counter(ngrams(create_corpus(pd.Series(text_corpus)),ngram))
pos_df = pd.DataFrame({
"words":[' '.join(items) for items in list(freq_pos.keys())],
"Count":list(freq_pos.values())
})
common_pos= pos_df.nlargest(columns="Count", n=30)
fig = px.bar(common_pos, x="words", y="Count", labels={"words": "Words", "Count":"Frequency"}, title=title)
fig.show();
# In[18]:
plot_freq_dist(positive_text, title = 'Most common words associated with positive tweets')
# Le résultat montre la prépondérance des `stopwords`, ces mots d'articulation, qui sont très communs et gènent l'identifiaction de mots clefs propres à un document / ensemble de documents spécifiques.
#
# Il convient donc d'effectuer des opérations de retraitement du texte pour analyse.
# ### Preprocessing
# Parmi les éléments propres aux tweets qui peuvent avoir un impact sur la suite on compte :
#
# - les mots clefs marqués par un `#`
# - les noms d'utilisateurs commençant par un `@`
# - les emoticons et emojis
# - les nombre de mots en MAJUSCULES
# - la répétition de caractères pour marquer l'emphase `!!!!`, `looooong`, ou l'autocensure `f***`
# - les fautes de frappes (mots de moins de 2 caractères)
# Afin de disposer de traitements homogènes, repoductibles et paramétrables, une fonction spécifique est créée. Les différenst paramètres pourront être testés dans les phase de modélistaion ultérieures.
# source [preprocess](https://www.kaggle.com/stoicstatic/twitter-sentiment-analysis-for-beginners)
# In[57]:
def preprocess_text(text_series,
apply_lemmatizer=True,
apply_lowercase=True,
apply_url_standerdisation=True,
apply_user_standerdisation=True,
apply_emoticon_to_words=True,
apply_stopwords_removal=True,
apply_shortwords_removal=True,
apply_non_alphabetical_removal=True,
apply_only_2_consecutive_charac=True
):
'''
Main preprocess function
inputs:
text_series : a pandas Series object with text to preprocess
outputs:
a preprocessed pandas Series object
'''
processedText = []
if apply_lemmatizer:
# Create Lemmatizer and Stemmer.
wordLemm = WordNetLemmatizer()
# Defining regex patterns.
urlPattern = r"((http://)[^ ]*|(https://)[^ ]*|( www\.)[^ ]*)"
userPattern = '@[^\s]+'
alphaPattern = r"[^(\w|\*|(!){2}|#)]"
sequencePattern = r"(.)\1\1+"
seqReplacePattern = r"\1\1"
for tweet in text_series:
if apply_lowercase:
tweet = tweet.lower()
if apply_url_standerdisation:
# Replace all URls with 'URL'
tweet = re.sub(urlPattern,' URL',tweet)
if apply_user_standerdisation:
# Replace @USERNAME to 'USER'.
tweet = re.sub(userPattern,' USER', tweet)
if apply_emoticon_to_words:
# Replace all emojis.
for emo in EMOTICONS:
#refactor outputs so that we come up with a single word when/if text spliting afterwards
val = "_".join(EMOTICONS[emo].replace(",","").split())
val='EMO_'+val
tweet = tweet.replace(emo, ' '+val+' ')
for emot in UNICODE_EMO:
val = "_".join(UNICODE_EMO[emot].replace(",","").replace(":","").split())
val='EMO_'+val
tweet = tweet.replace(emo, ' '+val+' ')
if apply_only_2_consecutive_charac:
# Replace 3 or more consecutive letters by 2 letter.
tweet = re.sub(sequencePattern, seqReplacePattern, tweet)
if apply_non_alphabetical_removal:
# Replace all non alphabets.
tweet = re.sub(alphaPattern, " ", tweet)
tweetwords = ''
for word in tweet.split():
# Checking if the word is a stopword.
if apply_stopwords_removal:
if word in stopwords.words('english'):
word=''
else:
word=word
#if word not in stopwordlist:
if apply_shortwords_removal:
if len(word)<=1:
word=''
else:
word=word
# Lemmatizing the word.
if apply_lemmatizer:
word = wordLemm.lemmatize(word)
else:
word=word
tweetwords += (word+' ')
processedText.append(tweetwords)
return processedText
# In[20]:
positive_text_prepro = preprocess_text(df['text'][df['sentiment']=='positive'], apply_lemmatizer=False, apply_non_alphabetical_removal=True)
# In[56]:
pd.Series(positive_text_prepro).head()
# In[21]:
neutral_text_prepro = preprocess_text(df['text'][df['sentiment']=='neutral'], apply_lemmatizer=False, apply_non_alphabetical_removal=True)
# In[58]:
pd.Series(neutral_text_prepro).head()
# In[22]:
negative_text_prepro = preprocess_text(df['text'][df['sentiment']=='negative'], apply_lemmatizer=False, apply_non_alphabetical_removal=True)
# In[59]:
pd.Series(negative_text_prepro).head()
# ### Analyses des mots clefs des tweets positifs
# La fonction suivante permettra de réaliser des nuages de mots à partir d'un corpus
# In[23]:
def plotWc(text, stopwords=None, title=''):
wc = WordCloud(
stopwords=stopwords,
width=800,
height=400,
max_words=1000,
random_state=44,
background_color="white",
collocations=False
).generate(text)
plt.figure(figsize = (10,10))
plt.imshow(wc, interpolation="bilinear")
plt.axis("off")
plt.title(title)
plt.show()
# In[24]:
plotWc(" ".join(positive_text_prepro), stopwords=stopwords.words('english'), title = "Wordcloud des tweets positifs")
# Les tweets positpositive_text_prepro marqués par la forte reprétsentation de mots à connotation positive `love`, `good`, `happy`.
#
# Cet a priori graphique peut être confirmé par un graphique de fréquence des mots individuels les plus présents
# In[26]:
plot_freq_dist(create_corpus(pd.Series(positive_text_prepro)), title = 'Most common words associated with positive tweets')
# In[27]:
plot_freq_dist(create_corpus(pd.Series(positive_text_prepro)), ngram=2, title = 'Most common 2grams associated with positive tweets')
# In[28]:
plot_freq_dist(create_corpus(pd.Series(positive_text_prepro)), ngram=3, title = 'Most common 3grams associated with positive tweets')
# In[29]:
plot_freq_dist(create_corpus(pd.Series(positive_text_prepro)), ngram=4, title = 'Most common 4grams associated with positive tweets')
# :::{admonition}[**insight**] :
#
# Une grande majorité de tweets positifs se rapportent soit à la fête des mère, soit au 4 Mai du fait du jeu de mot avec Star Wars...
#
# <div>
# <img src=https://upload.wikimedia.org/wikipedia/fr/c/ca/LogoSW4th.png width="400"/>
# </div>
#
#
# Cette spécificité sera surement exploitée par les modèles comme un marqueur probable de tweets positifs.
# :::
# ### Analyse des mots clefs des tweets neutres
# In[30]:
plotWc(" ".join(pd.Series(neutral_text_prepro)), stopwords=stopwords.words('english'), title = "Wordcloud des tweets neutres")
# In[31]:
plot_freq_dist(create_corpus(pd.Series(neutral_text_prepro)), title = 'Most common words associated with neutral tweets')
# :::{admonition}[**Insight**] :
#
# On peut déjà remarquer que le mot `day`, qui est le plus fréquent des mots clefs des tweets positifs apparaît aussi en 6ème position des mots neutres.
#
# :::
# In[32]:
plot_freq_dist(create_corpus(pd.Series(neutral_text_prepro)), ngram=2, title = 'Most common 2grams associated with neutral tweets')
# In[33]:
plot_freq_dist(create_corpus(pd.Series(neutral_text_prepro)), ngram=3, title = 'Most common 3grams associated with neutral tweets')
# In[34]:
plot_freq_dist(create_corpus( | pd.Series(neutral_text_prepro) | pandas.Series |
# object that contains the simulation data.
class MonteCarlo:
'''
(OBJECT INFO)
-------------
vandal.MonteCarlo - main class.
(OBJECT FUNCTIONS)
------------------
eg. vandal.MonteCarlo.function()
.execute() - executes a Monte Carlo simulation on a defined data set.
* takes 4 additional arguments.
list_of_values - pandas dataframe of values.
time_seq - desired time sequence.
num_sims - desired number of simulation iterations.
ref_value_index (default: ref_value_index = 0) - index on which the starting point of the simulation is created.
* Requirements:
pandas Python module.
pd.DataFrame() defined data set.
.graph() - plots the Monte Carlo simulation on a graph.
* takes 5 optional customization arguments. (default: graph_title = 'Monte Carlo simulation', x_title = 'X axis', y_title = 'Y axis', plot_size = (25,10), perform_block = True).
graph_title - title of the graph.
x_title - title of the X axis.
y_title - title on the Y axis.
plot_size - desired size of the graph. eg. - (x_lenght_num, y_lenght_num). - NOTE: values must be inside the parentheses and divided by a comma.
perform_block (default: perform_block = True) - False/True may be used depending on the IDE requirements.
.get_risk() - calculates the risk of value decrease over time.
* takes 1 optional argument (default: risk_sims = 5000).
.get_stats() - shows the statistics of the Monte Carlo simulation.
* takes no additional arguments.
.get_change() - shows the percentage of Monte Carlo simulation value change for every iteration.
* takes no additional arguments.
.hist() - plots the histogram of Monte Carlo simulation.
* takes 6 optional customization arguments. (default: graph_title = 'Monte Carlo simulation', x_title = 'X axis', y_title = 'Y axis', plot_size = (25,10), perform_block = True, method = 'b').
If method = 'e' is chosen, no customization arguments apply.
graph_title - title of the graph.
x_title - title of the X axis.
y_title - title on the Y axis.
plot_size - desired size of the graph. eg. - (x_lenght_num, y_lenght_num). - NOTE: values must be inside the parentheses and divided by a comma.
perform_block (default: perform_block = True) - False/True may be used depending on the IDE requirements.
method - default method is Basic histogram and it's performed by automation. In order to plot Empirical rule histogram add method = 'e' as the last argument. - NOTE: method of a histogram must be placed within quotation marks.
* automatically executes the .get_stats(filtered = True) function in order to get standard deviation for the Empirical rule plotting.
(DEVELOPER MODE)
----------------
Developer mode functions can only be set up manually by removing the '#DEVELOPER MODE -' in the source code.
* takes no additional arguments.
* Requirements:
'# DEVELOPER MODE -' removed in the code.
'''
# metadata of the used library.
from vandal.misc._meta import (
__author__,
__copyright__,
__credits__,
__license__,
__version__,
__documentation__,
__contact__,
__donate__,
)
# import duality package decorators.
# DEVELOPER MODE - from duality.decorators.classparticles import track, record
# initial launch.
# DEVELOPER MODE - @track.entry('init')
# DEVELOPER MODE - @record.entry(option_name = 'init', option_description = 'initial launch.')
def __init__(self):
pass
# class information.
# DEVELOPER MODE - @track.entry('string')
# DEVELOPER MODE - @record.entry(option_name = 'string', option_description = 'class information.')
def __str__(self):
return f'Monte Carlo defining object that stores the configuration data for creating {self.num_sims} simulations in a period of {self.time_seq} time measurement units.'
# class information.
# DEVELOPER MODE - @track.entry('representation')
# DEVELOPER MODE - @record.entry(option_name = 'repr', option_description = 'shows object details.')
def __repr__(self):
return f'Monte Carlo defining object that stores the configuration data for creating {self.num_sims} simulations in a period of {self.time_seq} time measurement units.'
# executes a Monte Carlo simulation on a defined data set.
# DEVELOPER MODE - @track.entry('execution')
# DEVELOPER MODE - @record.entry(option_name = 'execution', option_description = 'executes a Monte Carlo simulation on a defined data set.')
def execute(self, list_of_values, time_seq, num_sims, ref_value_index = 0):
self.list_of_values = list_of_values
self.time_seq = time_seq
self.num_sims = num_sims
self.ref_value_index = ref_value_index
print(f'Monte Carlo has been set up for {self.num_sims} simulations in a period of {self.time_seq} time measurement units.')
from vandal.hub.toolkit import random_value
print('Monte Carlo simulation has been executed.')
print('NOTE: Use data with reasonable standard deviation in order to prevent exponential growth of the function that cannot be plotted properly, recognize such abnormal values by a + sign anywhere in the data executed below.\nThe model that will be able to handle big standard deviations is currently being worked on, thank you for your patience.\n')
import pandas as pd
# this removes pandas warning of highly fragmented DataFrame for newer pandas versions.
from warnings import simplefilter
simplefilter(action = 'ignore', category = pd.errors.PerformanceWarning)
# end of pandas warning removal block.
today_value = self.list_of_values.iloc[self.ref_value_index]
data = pd.DataFrame()
loading = 0
for num_sim in range(self.num_sims):
rand_change = random_value(self.list_of_values.pct_change().mean(), self.list_of_values.pct_change().std())
count = 0
index_array = []
index_array += [today_value * (1 + rand_change)]
if index_array[count] > (index_array[-1] * 2):
raise Exception('Variation between data is too big, due to detection of exponentional increase of values or non-sequential data Monte Carlo simulation cannot be executed properly.')
for num_day in range(self.time_seq):
rand_change = random_value(self.list_of_values.pct_change().mean(), self.list_of_values.pct_change().std())
if count == self.time_seq:
break
index_array += [index_array[count] * (1 + rand_change)]
count += 1
if index_array[count] > (index_array[-1] * 2):
raise Exception('Variation between data is too big, due to detection of exponentional increase of values or non-sequential data Monte Carlo simulation function cannot be executed properly.')
loading += 1
print(end = '\r')
print(loading, 'iterations out of', self.num_sims, 'executed so far', end = '')
data[num_sim] = index_array
print(end = '\r')
print('Monte Carlo simulation set up and ready to plot.')
self.results = data
return data
# shows the percentage of Monte Carlo simulation value change for every iteration.
# DEVELOPER MODE - @track.entry('change')
# DEVELOPER MODE - @record.entry(option_name = 'change', option_description = 'shows the percentage of Monte Carlo simulation value change for every iteration.')
def get_change(self):
return self.results.pct_change()
# calculates the risk of negative values occuring.
# DEVELOPER MODE - @track.entry('risk')
# DEVELOPER MODE - @record.entry(option_name = 'risk', option_description = 'calculates the risk of negative values occuring.')
def get_risk(self, risk_sims = 5000):
import random
import pandas as pd
#This removes pandas warning of highly fragmented DataFrame for newer pandas versions.
from warnings import simplefilter
simplefilter(action = 'ignore', category = pd.errors.PerformanceWarning)
#End of pandas warning removal block.
today_value = self.list_of_values.iloc[self.ref_value_index]
percent_change = self.list_of_values.pct_change()
data = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import os
from collections import namedtuple
from strategy.strategy import Exposures, Portfolio
from strategy.rebalance import get_relative_to_expiry_instrument_weights, \
get_relative_to_expiry_rebalance_dates, get_fixed_frequency_rebalance_dates
from strategy.calendar import get_mtm_dates
def make_container(holdings, trades, pnl):
container = namedtuple("sim_result", ["holdings", "trades", "pnl"])
return container(holdings, trades, pnl)
def make_exposures(root_generics, meta_fp, market_fp):
return Exposures.from_folder(meta_fp, market_fp, root_generics)
def make_portfolio(exposures, sd, ed, capital, offset, all_monthly=False,
holidays=None):
rebal_dts = get_relative_to_expiry_rebalance_dates(
sd, ed, exposures.expiries, offset, all_monthly=all_monthly
)
exchanges = exposures.meta_data.loc["exchange", :].unique()
mtm_dates = get_mtm_dates(sd, ed, exchanges, holidays=holidays)
root_generics = exposures.future_root_and_generics
wts = get_relative_to_expiry_instrument_weights(
mtm_dates, root_generics, exposures.expiries, offset,
all_monthly=all_monthly
)
portfolio = Portfolio(
exposures, rebal_dts, mtm_dates, wts, initial_capital=capital
)
return portfolio
def make_frequency_portfolio(frequency, offset, exposures, sd, ed, capital,
holidays=None):
rebal_dts = get_fixed_frequency_rebalance_dates(
sd, ed, frequency, offset
)
wts = {}
exchanges = exposures.meta_data.loc["exchange", :].unique()
mtm_dates = get_mtm_dates(sd, ed, exchanges, holidays=holidays)
portfolio = Portfolio(
exposures, rebal_dts, mtm_dates, wts, initial_capital=capital
)
return portfolio
def make_signal(portfolio):
asts = portfolio.future_generics + portfolio.equities
dates = portfolio.rebalance_dates
signal = pd.DataFrame(1, index=dates, columns=asts)
return signal
def get_notionals(risk_target, capital, signals, prices, multipliers,
discrete):
if discrete:
def calc(sig, price, mult):
return round(sig * risk_target * capital / (price * mult)) * price * mult # NOQA
else:
def calc(sig, price, mult):
return sig * risk_target * capital * price * mult
notionals = []
for s_i, p_i, m_i in zip(signals, prices, multipliers):
notionals.append(calc(s_i, p_i, m_i))
return notionals
def read_futures_instr(data_path, instr):
fn = os.path.join(data_path, instr[:2], instr + ".csv")
data = pd.read_csv(fn, parse_dates=True, index_col=0)
data = data.Settle
data.sort_index(inplace=True)
return data
def splice_futures_and_pnl(data_path, instr_sd_ed):
# instr_sd_ed is a list of tuples,
# e.g. [("ESH2015", sd, ed1), ("ESM2015", ed2)], only sd is given for
# first contract, assummed consecutive afterwards
MULTS = {"ES": 50, "TY": 1000}
prices = []
pnls = []
instr, sd, ed = instr_sd_ed[0]
sd = pd.Timestamp(sd)
ed = pd.Timestamp(ed)
price = read_futures_instr(data_path, instr)
price = price.loc[sd:ed]
# drop NaN at start
pnls.append(price.diff().iloc[1:])
# since holdings on rebalance day are post rebalance holdings
prices.append(price.iloc[:-1])
sd = ed
for i, instr_ed in enumerate(instr_sd_ed[1:]):
instr, ed = instr_ed
ed = pd.Timestamp(ed)
price = read_futures_instr(data_path, instr)
price = price.loc[sd:ed]
# drop NaN at start
pnls.append(price.diff().iloc[1:])
# check for last element
if i < (len(instr_sd_ed[1:]) - 1):
prices.append(price.iloc[:-1])
else:
prices.append(price)
sd = ed
prices = pd.concat(prices, axis=0) * MULTS[instr[:2]]
pnls = pd.concat(pnls, axis=0) * MULTS[instr[:2]]
return prices, pnls
def splice_returns(data_path, instr_sd_ed):
# instr_sd_ed is a list of tuples,
# e.g. [("ESH2015", sd1, ed1), ("ESM2015", sd2, ed2)]
rets = []
for instr, sd, ed in instr_sd_ed:
sd = pd.Timestamp(sd)
ed = | pd.Timestamp(ed) | pandas.Timestamp |
#! /usr/bin/python3
import numpy as np
import random as rnd
import functions
import pandas as pd
class NAgent:
nnetfileName = "./data/garry_007.nn"
user_id = None
case_id = None
url = None
nnet = None
rmsprop_cache = None
grad_buffer = None
prev_act = None
prev_score = None
prev_hash = None
prev_weights_dict = None
decay_rate = 0.95 # decay factor for RMSProp leaky sum of grad^2
alpha = 0.01 # learning rate
gamma = 0.51 # discount coef
delta = 0.0001 # LR descent coef
batch_size = 10
help_degree = 0.2 # part of help of correct weights
dropout = 0.5 # part of neurons in hidden layers to dropout
xs, hs, h2s, errs, zs, rs = [], [], [], [], [], []
a_xs, a_hs, a_h2s, a_zerrs = None, None, None, None
episode = pd.DataFrame(columns= ['hash_s', 'act', 'reward', 'hash_news'])
gamesQ = 0 # счетчик игр - нужен для реализации батча при обучении
all_acts_dict = {
"none": ["noAct", "noAct"], "take": ["noAct", "Take"],
"go_forward": ["noAct", "Go"], "go_right": ["onRight", "Go"],
"go_back": ["upSideDn", "Go"], "go_left": ["onLeft", "Go"],
"shoot_forward": ["noAct", "Shoot"], "shoot_right": ["onRight", "Shoot"],
"shoot_back": ["upSideDn", "Shoot"], "shoot_left": ["onLeft", "Shoot"]
}
all_acts_nums_dict = {
"none": 0, "take": 1, "go_forward": 2, "go_right": 3, "go_back": 4,
"go_left": 5, "shoot_forward": 6, "shoot_right": 7, "shoot_back": 8,
"shoot_left": 9
}
all_acts_list = [ "none", "take", "go_forward", "go_right", "go_back", "go_left",
"shoot_forward", "shoot_right", "shoot_back", "shoot_left" ]
colnames = ["hash", "take", "go_forward", "go_right", "go_back", "go_left", "shoot_forward", "shoot_right",
"shoot_back", "shoot_left"]
fin_codes = ['--- agent is dead ---', '---- time is over ---', '!!! agent is WINNER !!!']
# PUBLIC METHODS ==============================================================
def playGame(self, map_num, alpha, gamma, batch_size=10, tid=0, hashid=0):
self.alpha = alpha
self.gamma = gamma
self.batch_size = batch_size
self.help_degree = 0.5
self.dropout = 0.5
request_code = None # код завершения хода
curr_score = None # набранные очки
# запрашиваем состояние начальной пещеры, выполняя пустой ход
acts = self.all_acts_dict["none"]
request = functions.connectToServer(self.user_id, self.case_id, map_num, acts, tid, hashid)
if request != None: # связь с сервером установлена, можно играть
# распарсиваем ответ сервера
request_error = request["error"]
percept = request["text"]
curr_score = percept['iagent']["score"]
# инициализация переменных, фиксирующих предыдущее состояние и ход
curr_hash = self.__getHash__(percept)
self.prev_act = "none"
self.prev_score = curr_score
self.prev_hash = curr_hash
# создание таблицы ходов для запоминания игры (эпизода)
rec = {'hash_s': [curr_hash], 'act': ["none"], 'reward': [0], 'hash_news': [curr_hash]}
self.episode = pd.DataFrame(columns= rec.keys())
# начинаем игру
while request_error == None: # пока никакой ошибки нет (нет завершения игры)
if request != None:
''' # выбираем для текущего состояния ход, если состояние новое, то добавляем его в базу
политики (полезностей) +4 записи; корректируем кол-во новых полей в базе данных '''
curr_act = self.__chooseAct__(curr_hash)
acts = self.all_acts_dict[curr_act]
# запоминаем набранное до выбранного хода кол-во очков и хэш текущего состояния s, выбранное действие
self.prev_score = curr_score
self.prev_hash = curr_hash
self.prev_act = curr_act
# запрашиваем ответ от сервера: сообщаем серверу выбранный ход и получаем новое состояние s'
request = functions.connectToServer(self.user_id, self.case_id, map_num, acts, tid, hashid)
if request != None:
# распарсиваем ответ сервера
request_error = request["error"]
percept = request["text"]
curr_score = percept["iagent"]["score"]
request_code = int(percept["code"])
curr_hash = self.__getHash__(percept)
# ----- дополнение таблицы ходов и списка бонусов информацией о новом ходе -----
reward = curr_score - self.prev_score
self.rs.append(reward)
# ---------- эта таблица нужна только для контроля ----------------
rec = {'hash_s': [self.prev_hash], 'act': [curr_act], 'reward': [reward], 'hash_news': [curr_hash]}
step1 = pd.DataFrame(data= rec)
self.episode = | pd.concat([self.episode, step1]) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import netCDF4 as nc
from netCDF4 import Dataset
id
import itertools
import datetime
from matplotlib.font_manager import FontProperties
#------------------------------------------------------------------------------
# Motivación codigo -----------------------------------------------------------
'Esta version de este codigo, saca los umbrales horarios y estacionalesde las reflectancias'
'en los pixeles seleccionados, cada 15 minutos porque se hace con el set de datos de GOES de'
'2018, debido a que es el mas completo y permitiría obtener los umbrales estacionalmente. La'
'versión antigua de este codigo que los sacaba cada 10 minutos para el horizonte del experi-'
'mento se aloja en la carpetade Backups_VersionesAtiguas_Codigos por si esnecesario volverlo'
'a consultar.'
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
## -------------------------------HORAS SOBRE LAS CUALES TRABAJAR----------------------------- ##
HI = '06:00'; HF = '17:59'
#################################################################################################
## -----------------INCORPORANDO LOS DATOS DE RADIACIÓN Y DE LOS EXPERIMENTOS----------------- ##
#################################################################################################
df_P975 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60012018.txt', parse_dates=[2])
df_P350 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60022018.txt', parse_dates=[2])
df_P348 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60032018.txt', parse_dates=[2])
df_P975 = df_P975.set_index(["fecha_hora"])
df_P975.index = df_P975.index.tz_localize('UTC').tz_convert('America/Bogota')
df_P975.index = df_P975.index.tz_localize(None)
df_P350 = df_P350.set_index(["fecha_hora"])
df_P350.index = df_P350.index.tz_localize('UTC').tz_convert('America/Bogota')
df_P350.index = df_P350.index.tz_localize(None)
df_P348 = df_P348.set_index(["fecha_hora"])
df_P348.index = df_P348.index.tz_localize('UTC').tz_convert('America/Bogota')
df_P348.index = df_P348.index.tz_localize(None)
df_P975.index = pd.to_datetime(df_P975.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P350.index = pd.to_datetime(df_P350.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P348.index = pd.to_datetime(df_P348.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
## ----------------ACOTANDO LOS DATOS A VALORES VÁLIDOS---------------- ##
'Como en este caso lo que interesa es la radiacion, para la filtración de los datos, se'
'considerarán los datos de radiacion mayores a 0.'
df_P975 = df_P975[(df_P975['radiacion'] > 0) ]
df_P350 = df_P350[(df_P350['radiacion'] > 0) ]
df_P348 = df_P348[(df_P348['radiacion'] > 0) ]
df_P975_h = df_P975.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P350_h = df_P350.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P348_h = df_P348.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
##----AJUSTE DE LOS DATOS DE RADIACIÓN REAL AL RANGO DE FECHAS DESEADO-----##
def daterange(start_date, end_date):
'Para el ajuste de las fechas en el modelo de Kumar cada hora. Las fechas'
'final e inicial son en str: %Y-%m-%d'
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
delta = timedelta(minutes=60)
while start_date <= end_date:
yield start_date
start_date += delta
fechas_975 = []
for i in daterange(df_P975.index[0].date().strftime("%Y-%m-%d"), (df_P975.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_975.append(i)
fechas_350 = []
for i in daterange(df_P350.index[0].date().strftime("%Y-%m-%d"), (df_P350.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_350.append(i)
fechas_348 = []
for i in daterange(df_P348.index[0].date().strftime("%Y-%m-%d"), (df_P348.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_348.append(i)
fi_m = min(fechas_975[0].month, fechas_350[0].month, fechas_348[0].month)
fi_d = min(fechas_975[0].day, fechas_350[0].day, fechas_348[0].day)
ff_m = min(fechas_975[-1].month, fechas_350[-1].month, fechas_348[-1].month)
ff_d = min(fechas_975[-1].day, fechas_350[-1].day, fechas_348[-1].day)
## -----------------------------AGREGAR DATOS DE PIRANOMETRO CADA 15 MINUTOS ------------------------------ ##
df_P348_15m = df_P348.groupby(pd.Grouper(freq="15Min")).mean()
df_P350_15m = df_P350.groupby(pd.Grouper(freq="15Min")).mean()
df_P975_15m = df_P975.groupby(pd.Grouper(freq="15Min")).mean()
df_P348_15m = df_P348_15m.between_time(HI, HF)
df_P350_15m = df_P350_15m.between_time(HI, HF)
df_P975_15m = df_P975_15m.between_time(HI, HF)
df_P348_15m = df_P348_15m.loc[~df_P348_15m.index.duplicated(keep='first')]
df_P350_15m = df_P350_15m.loc[~df_P350_15m.index.duplicated(keep='first')]
df_P975_15m = df_P975_15m.loc[~df_P975_15m.index.duplicated(keep='first')]
####################################################################################
## ----------------LECTURA DE LOS DATOS DE GOES CH2 MALLA GENERAL---------------- ##
####################################################################################
Rad = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Rad_2018_2019CH2.npy')
fechas_horas = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_FechasHoras_Anio.npy')
df_fh = pd.DataFrame()
df_fh ['fecha_hora'] = fechas_horas
df_fh['fecha_hora'] = pd.to_datetime(df_fh['fecha_hora'], format="%Y-%m-%d %H:%M", errors='coerce')
df_fh.index = df_fh['fecha_hora']
w = pd.date_range(df_fh.index.min(), df_fh.index.max()).difference(df_fh.index)
df_fh = df_fh[df_fh.index.hour != 5]
#################################################################################################
##-------------------LECTURA DE LOS DATOS DE CH2 GOES PARA CADA PIXEL--------------------------##
#################################################################################################
Rad_pixel_975 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Rad_pix975_Anio.npy')
Rad_pixel_350 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Rad_pix350_Anio.npy')
Rad_pixel_348 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Rad_pix348_Anio.npy')
fechas_horas = df_fh['fecha_hora'].values
## -- Creación de dataframe de radiancias
Rad_df_975 = pd.DataFrame()
Rad_df_975['Fecha_Hora'] = fechas_horas
Rad_df_975['Radiacias'] = Rad_pixel_975
Rad_df_975['Fecha_Hora'] = pd.to_datetime(Rad_df_975['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Rad_df_975.index = Rad_df_975['Fecha_Hora']
Rad_df_975 = Rad_df_975.drop(['Fecha_Hora'], axis=1)
Rad_df_975 = Rad_df_975.between_time('06:00', '18:00') ##--> Seleccionar solo los datos de horas del dia
Rad_df_975_h = Rad_df_975.groupby(pd.Grouper(freq="H")).mean()
Rad_df_350 = pd.DataFrame()
Rad_df_350['Fecha_Hora'] = fechas_horas
Rad_df_350['Radiacias'] = Rad_pixel_350
Rad_df_350['Fecha_Hora'] = pd.to_datetime(Rad_df_350['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Rad_df_350.index = Rad_df_350['Fecha_Hora']
Rad_df_350 = Rad_df_350.drop(['Fecha_Hora'], axis=1)
Rad_df_350 = Rad_df_350.between_time('06:00', '18:00') ##--> Seleccionar solo los datos de horas del dia
Rad_df_350_h = Rad_df_350.groupby(pd.Grouper(freq="H")).mean()
Rad_df_348 = pd.DataFrame()
Rad_df_348['Fecha_Hora'] = fechas_horas
Rad_df_348['Radiacias'] = Rad_pixel_348
Rad_df_348['Fecha_Hora'] = | pd.to_datetime(Rad_df_348['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce') | pandas.to_datetime |
import json
import pandas as pd
from datetime import datetime
from src.func import tweet_utils
from src.func import regex
def load_tweets(geotweet_path):
with open(geotweet_path, 'r') as f:
tweets = json.load(f)
return remove_duplicates(tweets)
def remove_duplicates(tweets):
df = pd.DataFrame.from_records(tweets)
df.drop_duplicates(subset='id_str', inplace=True)
tweets = df.to_dict('records')
return tweets
def bad_tweet_filter(tweet, hashtag_list):
if | pd.isnull(tweet['pure_text']) | pandas.isnull |
#!/usr/bin/env python
# coding: utf-8
# # Wasserstein Pareto Frontier Experiment on Adult Data Set
# ## Import Data
# The experiment used the Adult experiment_data2 data set as in "Optimized Pre-Processing for Discrimination Prevention" by Calmon and etc. for comparison purpose: https://github.com/fair-preprocessing/nips2017/tree/master/Adult/experiment_data2
# In[1]:
import numpy as np
import pandas as pd
import scipy
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.interpolate import interp1d
from tqdm import tqdm
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix, roc_auc_score, auc, classification_report, roc_curve
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from matplotlib import pyplot
from scipy.linalg import sqrtm
from scipy import stats
from matplotlib import gridspec
from matplotlib.patches import Rectangle
# import data
path =r'/Users/shizhouxu/Documents/LIBRARY/Python/Fair_L2_Supervised_Learning/experiment_data2/' # use your path
train_0 = pd.read_csv(path + "train_0.csv",index_col=None, header=0, usecols=range(1,6))
train_1 = pd.read_csv(path + "train_1.csv",index_col=None, header=0, usecols=range(1,6))
train_2 = pd.read_csv(path + "train_2.csv",index_col=None, header=0, usecols=range(1,6))
train_3 = pd.read_csv(path + "train_3.csv",index_col=None, header=0, usecols=range(1,6))
train_4 = pd.read_csv(path + "train_4.csv",index_col=None, header=0, usecols=range(1,6))
test_0 = pd.read_csv(path + "test_0.csv",index_col=None, header=0, usecols=range(1,6))
test_1 = pd.read_csv(path + "test_1.csv",index_col=None, header=0, usecols=range(1,6))
test_2 = pd.read_csv(path + "test_2.csv",index_col=None, header=0, usecols=range(1,6))
test_3 = pd.read_csv(path + "test_3.csv",index_col=None, header=0, usecols=range(1,6))
test_4 = pd.read_csv(path + "test_4.csv",index_col=None, header=0, usecols=range(1,6))
train_new_0 = pd.read_csv(path + "train_new_0.csv",index_col=None, header=0, usecols=range(1,6))
train_new_1 = pd.read_csv(path + "train_new_1.csv",index_col=None, header=0, usecols=range(1,6))
train_new_2 = pd.read_csv(path + "train_new_2.csv",index_col=None, header=0, usecols=range(1,6))
train_new_3 = pd.read_csv(path + "train_new_3.csv",index_col=None, header=0, usecols=range(1,6))
train_new_4 = pd.read_csv(path + "train_new_4.csv",index_col=None, header=0, usecols=range(1,6))
test_new_0 = pd.read_csv(path + "test_new_0.csv",index_col=None, header=0, usecols=range(1,6))
test_new_1 = pd.read_csv(path + "test_new_1.csv",index_col=None, header=0, usecols=range(1,6))
test_new_2 = pd.read_csv(path + "test_new_2.csv",index_col=None, header=0, usecols=range(1,6))
test_new_3 = pd.read_csv(path + "test_new_3.csv",index_col=None, header=0, usecols=range(1,6))
test_new_4 = pd.read_csv(path + "test_new_4.csv",index_col=None, header=0, usecols=range(1,6))
# all available data variables: features = ['Age (decade)','Education Years','Income','Gender','Race','Income Binary']
features = ['Age (decade)','Education Years','Income','Gender','Income Binary']
# sensitive random variable Z: Z_features = ['Gender']
Z_features = ['Gender']
# dependent random variable Y: Y_features = ['Income Binary']
Y_features = ['Income Binary']
# independent random variable X: X_features = ['Age (decade)', 'Education Years']
X_features = ['Age (decade)', 'Education Years']
# combine the data sets by train/test category:
TrainList=[train_0,train_1,train_2,train_3,train_4]
TestList=[test_0,test_1,test_2,test_3,test_4]
TrainNewList=[train_new_0,train_new_1,train_new_2,train_new_3,train_new_4]
TestNewList=[test_new_0,test_new_1,test_new_2,test_new_3,test_new_4]
# data set combined exlcluding the linear dependent variable 'Income': df
ord_enc = OrdinalEncoder()
df = pd.concat([train_0,train_1,train_2,train_3,train_4,test_0,test_1,test_2,test_3,test_4])
df = df.drop('Income',axis = 1)
# data set further excluding the sensitive variable: df_delete
df_delete = df.drop('Gender',axis = 1)
# sensitive variable Z: gender
gender = np.array(df['Gender'])
# ## Compute the Wasserstein Pseudo-barycenter for X
# In[2]:
# independent variable: X
X = np.array(pd.get_dummies(df[X_features]))
# dependent variable: Y
Y = np.array(pd.get_dummies(df[Y_features]))
# mean of X and Y: X_mean, Y_mean
X_mean = np.mean(X,axis = 0)
Y_mean = np.mean(Y)
# covariance (matrix) of X and Y: X_cov, Y_cov
X_cov = np.cov(X.T)
Y_cov = np.cov(Y.T)
# marginal (conditional) dependent variables: X_male, Y_male
X_male = X[gender == ' Male',:]
Y_male = Y[gender == ' Male']
X_female = X[gender == ' Female',:]
Y_female = Y[gender == ' Female']
# marginal mean: X_(fe)male_mean, Y_(fe)male_mean
X_male_mean = np.average(X_male, axis = 0)
Y_male_mean = np.average(Y_male)
X_female_mean = np.average(X_female, axis = 0)
Y_female_mean = np.average(Y_female)
# marginal covariance: X_(fe)male_cov, Y_(fe)male_cov,
X_male_cov = np.cov(X_male.T)
Y_male_cov = np.cov(Y_male.T)
X_female_cov = np.cov(X_female.T)
Y_female_cov = np.cov(Y_female.T)
# cross-covariance (matrix) between Y and X: yX_(fe)male_cov
yX_male_cov = np.cov(Y_male.T, X_male.T)[range(1,17),0]
yX_female_cov = np.cov(Y_female.T, X_female.T)[range(1,17),0]
# algorithm 1, step 1: iterative process to the independent barycenter covariance matrix with stop cirterion error equals 0.0001
sample_size = len(X[:,0])
X_bar = np.random.rand(16,16) # random initialization for the covariance
eps = 10 # initialization for the stop variable
while eps > 0.0001:
X_new = ((len(X_male[:,0])/sample_size) * sqrtm(sqrtm(X_bar)@X_male_cov@sqrtm(X_bar))) + ((len(X_female[:,0])/sample_size) * sqrtm(sqrtm(X_bar)@X_female_cov@sqrtm(X_bar)))
eps = np.linalg.norm(X_bar - X_new)
X_bar = X_new
# algorithm 1, step 2: the corresponding Brenier's map for marginals of X: T_X_(fe)male
T_X_male = np.linalg.inv(sqrtm(X_male_cov)) @ sqrtm( sqrtm(X_male_cov) @ X_bar @ sqrtm(X_male_cov) ) @ np.linalg.inv(sqrtm(X_male_cov))
T_X_female = np.linalg.inv(sqrtm(X_female_cov)) @ sqrtm( sqrtm(X_female_cov) @ X_bar @ sqrtm(X_female_cov) ) @ np.linalg.inv(sqrtm(X_female_cov))
# wasserstein pseudo-barycenter for X separated in train/test categories: X_TrainFairList, X_TestFairList
X_TrainFairList = []
X_TestFairList = []
for i in range(0,len(TrainList)):
train = np.array(pd.get_dummies(TrainList[i][X_features]))
test = np.array(pd.get_dummies(TestList[i][X_features]))
gender_train_i = np.array(TrainList[i][Z_features]).T[0,:]
gender_test_i = np.array(TestList[i][Z_features]).T[0,:]
train_new = np.random.rand(train.shape[0],train.shape[1])
test_new = np.random.rand(test.shape[0],test.shape[1])
train_new[gender_train_i == ' Male',:] = (train[gender_train_i == ' Male',:] - X_male_mean) @ T_X_male.T + X_mean
train_new[gender_train_i == ' Female',:] = (train[gender_train_i == ' Female',:] - X_female_mean) @ T_X_female.T + X_mean
test_new[gender_test_i == ' Male',:] = (test[gender_test_i == ' Male',:] - X_male_mean) @ T_X_male.T + X_mean
test_new[gender_test_i == ' Female',:] = (test[gender_test_i == ' Female',:] - X_female_mean) @ T_X_female.T + X_mean
X_TrainFairList.append(train_new)
X_TestFairList.append(test_new)
# ## Compute the Wasserstein Pseudo-barycenter for E(Y|X)
# In[3]:
# wasserstein pseudo-barycenter for X: X_fair
X_fair = np.concatenate([X_TrainFairList[0],X_TrainFairList[1],X_TrainFairList[2],X_TrainFairList[3],X_TrainFairList[4],X_TestFairList[0],X_TestFairList[1],X_TestFairList[2],X_TestFairList[3],X_TestFairList[4]])
# marginal (conditional) X_fair: X_fair_(fe)male
X_fair_male = X_fair[gender == ' Male',:]
X_fair_female = X_fair[gender == ' Female',:]
# marginal means for X_fair: X_fair_(fe)male_mean
X_fair_male_mean = np.average(X_fair_male, axis = 0)
X_fair_female_mean = np.average(X_fair_female, axis = 0)
# marginal covariance for X_fair: X_fair_(fe)male_cov
X_fair_male_cov = np.cov(X_fair_male.T)
X_fair_female_cov = np.cov(X_fair_female.T)
# cross-covariance between Y and X_fair: yX_fair_(fe)male_cov
yX_fair_male_cov = np.cov(Y_male.T, X_fair_male.T)[range(1,17),0]
yX_fair_female_cov = np.cov(Y_female.T, X_fair_female.T)[range(1,17),0]
# covariance of marginal E(Y|X) in Gaussian case: yoX_(fe)male_cov
# which is also the optimal linear estimation of covariance of E(Y|X) in general distribution case
yoX_male_cov = yX_fair_male_cov@np.linalg.inv(X_fair_male_cov)@yX_fair_male_cov.T
yoX_female_cov = yX_fair_female_cov@np.linalg.inv(X_fair_female_cov)@yX_fair_female_cov.T
# algorithm 2, step 1: iterative process to the dependent barycenter covariance matrix with stop cirterion error equals 0.00000001
Y_bar = np.random.rand()
eps = 10
while eps > 0.00000001:
Y_new = ((len(X_male[:,0])/sample_size) * np.sqrt(np.sqrt(Y_bar)*yoX_male_cov*np.sqrt(Y_bar))) + ((len(X_female[:,0])/sample_size) * np.sqrt(np.sqrt(Y_bar)*yoX_female_cov*np.sqrt(Y_bar)))
eps = Y_bar - Y_new
Y_bar = Y_new
# algorithm 2, step 2: the corresponding Brenier's map for marginals of E(y|X): T_Y_(fe)male
T_Y_male = (1/np.sqrt(yoX_male_cov)) * np.sqrt( np.sqrt(yoX_male_cov) * Y_bar * np.sqrt(yoX_male_cov) ) * (1/np.sqrt(yoX_male_cov))
T_Y_female = (1/np.sqrt(yoX_female_cov)) * np.sqrt( np.sqrt(yoX_female_cov) * Y_bar * np.sqrt(yoX_female_cov) ) * (1/np.sqrt(yoX_female_cov))
# wasserstein pseudo-barycenter for Y separated in train/test categories: Y_TrainFairList, Y_TestFairList
Y_TrainFairList = []
Y_TestFairList = []
for i in range(0,len(TrainList)):
train = np.array(pd.get_dummies(TrainList[i][Y_features]))
test = np.array(pd.get_dummies(TestList[i][Y_features]))
train_new = np.random.rand(len(train.T[0,:]))
test_new = np.random.rand(len(test.T[0,:]))
gender_train_i = np.array(TrainList[i][Z_features]).T[0,:]
gender_test_i = np.array(TestList[i][Z_features]).T[0,:]
train_new[gender_train_i == ' Male'] = ((train[gender_train_i == ' Male'] - Y_male_mean) * T_Y_male.T + Y_mean).T[0,:]
train_new[gender_train_i == ' Female'] = ((train[gender_train_i == ' Female'] - Y_female_mean) * T_Y_female.T + Y_mean).T[0,:]
test_new[gender_test_i == ' Male'] = ((test[gender_test_i == ' Male'] - Y_male_mean) * T_Y_male.T + Y_mean).T[0,:]
test_new[gender_test_i == ' Female'] = ((test[gender_test_i == ' Female'] - Y_female_mean) * T_Y_female.T + Y_mean).T[0,:]
Y_TrainFairList.append(train_new)
Y_TestFairList.append(test_new)
# Algorithm 2, step 4: reshape the dependent pseudo-barycenter to binary variable for logit regression
fair_value = np.unique(Y_TrainFairList[0])
Y_prob = (fair_value - np.min(fair_value))/(np.max(fair_value) - np.min(fair_value))
for j in range(0,len(Y_TrainFairList)):
for i in range(0,len(fair_value)):
Y_TrainFairList[j][Y_TrainFairList[j] == fair_value[i]] = np.random.binomial(size = len(np.where(Y_TrainFairList[j] == fair_value[i])[0]),n = 1,p = Y_prob[i])
Y_TestFairList[j][Y_TestFairList[j] == fair_value[i]] = np.random.binomial(size = len(np.where(Y_TestFairList[j] == fair_value[i])[0]),n = 1,p = Y_prob[i])
# In[4]:
# random forest test for the fair representation of data (barycenter pair)
RFModelsAUC=[]
RFTestPreds=[]
test_disc = []
for i in range(0,len(TrainList)):
rf=RandomForestClassifier()
rf.fit(X_TrainFairList[i],Y_TrainFairList[i])
proba=rf.predict_proba(X_TestFairList[i])
ytrue=TestList[i][Y_features]
testauc=roc_auc_score(ytrue, proba[:, 1])
RFModelsAUC.append(testauc)
temp=TestList[i][Z_features+X_features+Y_features]
temp['pred']=proba[:,1]
mean = temp.groupby(Z_features)['pred'].mean()
v = mean.values
v = v.reshape(len(v),1)
ratio_df = pd.DataFrame(v/v.transpose(),index=mean.index,columns=mean.index )
ratio_df_arr=np.asarray(np.abs(1-ratio_df))
maxdisc=np.amax(ratio_df_arr)
test_disc.append(maxdisc)
RFres_Pseudobary = (RFModelsAUC, RFTestPreds)
RFDisc_Pseudobary = test_disc
RFModelsAUC, RFDisc_Pseudobary
# In[5]:
# logistic regression test for the fair representation of data (barycenter pair)
LRModelsAUC=[]
LRTestPreds=[]
test_disc = []
for i in range(0,len(TrainList)):
lr=LogisticRegression()
lr.fit(X_TrainFairList[i],Y_TrainFairList[i])
proba=lr.predict_proba(X_TestFairList[i])
ytrue=TestList[i][Y_features]
testauc=roc_auc_score(ytrue, proba[:, 1])
LRModelsAUC.append(testauc)
LRTestPreds.append(proba[:,1])
temp=TestList[i][Z_features+X_features+Y_features]
temp['pred']=proba[:,1]
mean = temp.groupby(Z_features)['pred'].mean()
v = mean.values
v = v.reshape(len(v),1)
ratio_df = pd.DataFrame(v/v.transpose(),index=mean.index,columns=mean.index )
ratio_df_arr=np.asarray(np.abs(1-ratio_df))
maxdisc=np.amax(ratio_df_arr)
test_disc.append(maxdisc)
LRres_Pseudobary = (LRModelsAUC, LRTestPreds)
LRDisc_Pseudobary = test_disc
LRModelsAUC, LRDisc_Pseudobary
# ## Estimate the geodesic path from the E(Y|X_z) to the barycenter of the marginal conditional expectations
# 1. Compute both geodesic path path from X to X^dash and from Y to Y^dash
# 2. Use diagonal argument to estimate the geodesic path from the original E(Y|X) to E(Y^dash|X^dash) on both train and test data sets: X_train/test_path_list, Y_train\test_path_list
# In[6]:
X_train_path_list = []
X_test_path_list = []
Y_train_path_list = []
Y_test_path_list = []
T = np.linspace(0,1,50) # discretize time variable T
# Algorithm 1, step 3: estimate of the independent variable (X) geodesic path using McCann interpolation
for i in range(0,len(TrainList)):
X_train_path = []
X_test_path = []
train = np.array(pd.get_dummies(TrainList[i][X_features]))
test = np.array(pd.get_dummies(TestList[i][X_features]))
gender_train_i = np.delete(np.array(pd.get_dummies(TrainList[i][Z_features])),[1],axis = 1).T[0,:]
gender_test_i = np.delete(np.array(pd.get_dummies(TestList[i][Z_features])),[1],axis = 1).T[0,:]
for t in range(0,len(T)):
train_new = np.zeros(train.shape)
test_new = np.zeros(test.shape)
tempt_train = train_new
tempt_test = test_new
tempt_train[gender_train_i == 0,:] = (1-T[t])*train[gender_train_i == 0,:] + T[t]*((train[gender_train_i == 0,:] - X_male_mean) @ T_X_male.T + X_mean)
tempt_train[gender_train_i == 1,:] = (1-T[t])*train[gender_train_i == 1,:] + T[t]*((train[gender_train_i == 1,:] - X_female_mean) @ T_X_female.T + X_mean)
tempt_test[gender_test_i == 0,:] = (1-T[t])*test[gender_test_i == 0,:] + T[t]*((test[gender_test_i == 0,:] - X_male_mean) @ T_X_male.T + X_mean)
tempt_test[gender_test_i == 1,:] = (1-T[t])*test[gender_test_i == 1,:] + T[t]*((test[gender_test_i == 1,:] - X_female_mean) @ T_X_female.T + X_mean)
# Algorithm 1, step 4: merge the corresponding (wrt t) linear interpolation of sensitive variable back to the MacCann interpolation of dependnet variable
X_train_path.append(np.concatenate((tempt_train, np.expand_dims(gender_train_i*(1-T[t]), axis=1)),axis = 1))
X_test_path.append(np.concatenate((tempt_test, np.expand_dims(gender_test_i*(1-T[t]), axis=1)),axis = 1))
X_train_path_list.append(X_train_path)
X_test_path_list.append(X_test_path)
# Algorithm 2, step 3: estimate of the dependnet (Y) geodesic path using McCann interpolation
for i in range(0,len(TrainList)):
Y_train_path = []
Y_test_path = []
train = np.array(pd.get_dummies(TrainList[i][Y_features]))
test = np.array(pd.get_dummies(TestList[i][Y_features]))
gender_train_i = np.delete(np.array(pd.get_dummies(TrainList[i][Z_features])),[1],axis = 1).T[0,:]
gender_test_i = np.delete(np.array(pd.get_dummies(TestList[i][Z_features])),[1],axis = 1).T[0,:]
for t in range(0,len(T)):
train_new = np.random.rand(len(train.T[0,:]))
test_new = np.random.rand(len(test.T[0,:]))
tempt_train = train_new
tempt_test = test_new
tempt_train[gender_train_i == 0] = ((1 - T[t] + T[t]*T_Y_male)*train[gender_train_i == 0] + T[t]*(Y_mean - T_Y_male*Y_male_mean)).T[0,:]
tempt_train[gender_train_i == 1] = ((1 - T[t] + T[t]*T_Y_female)*train[gender_train_i == 1] + T[t]*(Y_mean - T_Y_female*Y_female_mean)).T[0,:]
tempt_test[gender_test_i == 0] = ((1 - T[t] + T[t]*T_Y_male)*test[gender_test_i == 0] + T[t]*(Y_mean - T_Y_male*Y_male_mean)).T[0,:]
tempt_test[gender_test_i == 1] = ((1 - T[t] + T[t]*T_Y_female)*test[gender_test_i == 1] + T[t]*(Y_mean - T_Y_female*Y_female_mean)).T[0,:]
Y_train_path.append(tempt_train)
Y_test_path.append(tempt_test)
Y_train_path_list.append(Y_train_path)
Y_test_path_list.append(Y_test_path)
# Algorithm 2, step 4: reshape the dependent pseudo-barycenter to binary variable for logit regression
for t in range(0,len(T)):
for i in range(0,len(TrainList)):
fair_value = np.unique(Y_train_path_list[i][t])
Y_prob = (fair_value - np.min(fair_value))/(np.max(fair_value) - np.min(fair_value))
for j in range(0,len(fair_value)):
Y_train_path_list[i][t][Y_train_path_list[i][t] == fair_value[j]] = np.random.binomial(size = len(np.where(Y_train_path_list[i][t] == fair_value[j])[0]),n = 1,p = Y_prob[j])
Y_test_path_list[i][t][Y_test_path_list[i][t] == fair_value[j]] = np.random.binomial(size = len(np.where(Y_test_path_list[i][t] == fair_value[j])[0]),n = 1,p = Y_prob[j])
# ## Performance Tests and Comparisons
# In[7]:
# ROC AUC of random forest trained via the fair data representation interpolation: RFModelsAUC_path_list
RFModelsAUC_path_list =[]
# classification discrimination (definition 6.1) of standard random forest trained via the fair data representation interpolation: RFDisc_path_list
RFDisc_path_list =[]
for i in range(0,len(TrainList)):
ModelsAUC_path = []
test_disc_path=[]
for t in range(0,len(T)):
rf=RandomForestClassifier()
rf.fit(X_train_path_list[i][t],Y_train_path_list[i][t])
proba=rf.predict_proba(X_test_path_list[i][t])
ytrue=TestList[i][Y_features]
testauc=roc_auc_score(ytrue, proba[:, 1])
ModelsAUC_path.append(testauc)
temp=TestList[i][Z_features+X_features+Y_features]
temp['pred']=proba[:,1]
mean = temp.groupby(Z_features)['pred'].mean()
v = mean.values
v = v.reshape(len(v),1)
ratio_df = pd.DataFrame(v/v.transpose(),index=mean.index,columns=mean.index )
ratio_df_arr=np.asarray(np.abs(1-ratio_df))
maxdisc=np.amax(ratio_df_arr)
test_disc_path.append(maxdisc)
RFModelsAUC_path_list.append(ModelsAUC_path)
RFDisc_path_list.append(test_disc_path)
# ROC AUC of logistic regression trained via the fair data representation interpolation: LRModelsAUC_path_list
LRModelsAUC_path_list =[]
# classification discrimination (definition 6.1) of logistic regression trained via the fair data representation interpolation: LRDisc_path_list
LRDisc_path_list =[]
for i in range(0,len(TrainList)):
ModelsAUC_path = []
test_disc_path=[]
for t in range(0,len(T)):
lr=LogisticRegression()
lr.fit(X_train_path_list[i][t],Y_train_path_list[i][t])
proba=lr.predict_proba(X_test_path_list[i][t])
ytrue=TestList[i][Y_features]
testauc=roc_auc_score(ytrue, proba[:, 1])
ModelsAUC_path.append(testauc)
temp=TestList[i][Z_features+X_features+Y_features]
temp['pred']=proba[:,1]
mean = temp.groupby(Z_features)['pred'].mean()
v = mean.values
v = v.reshape(len(v),1)
ratio_df = pd.DataFrame(v/v.transpose(),index=mean.index,columns=mean.index )
ratio_df_arr=np.asarray(np.abs(1-ratio_df))
maxdisc=np.amax(ratio_df_arr)
test_disc_path.append(maxdisc)
LRModelsAUC_path_list.append(ModelsAUC_path)
LRDisc_path_list.append(test_disc_path)
# average auc and discrimination of the five fold cross validation: acc_RF, dis_RF, acc_LR, dis_LR
acc_RF = np.average(np.array(RFModelsAUC_path_list),axis = 0)
dis_RF = np.average(np.array(RFDisc_path_list),axis = 0)
acc_LR = np.average(np.array(LRModelsAUC_path_list),axis = 0)
dis_LR = np.average(np.array(LRDisc_path_list),axis = 0)
# ## Comparison Methods and Corresponding Numerical Implements
# In[8]:
interploation_model = interp1d(dis_RF, acc_RF, kind = "linear")
# Plotting the Graph
X_RF=np.linspace(dis_RF.min(), dis_RF.max(), 500)
Y_RF=interploation_model(X_RF)
plt.plot(X_RF, Y_RF)
plt.title("Pareto Frontier for Random Forest on Adult")
plt.xlabel("Disparity")
plt.ylabel("AUC")
plt.show()
# In[9]:
interploation_model = interp1d(dis_LR, acc_LR, kind = "linear")
# Plotting the Graph
X_LR=np.linspace(dis_LR.min(), dis_LR.max(), 500)
Y_LR=interploation_model(X_LR)
plt.plot(X_LR, Y_LR)
plt.title("Pareto Frontier for Logistic Regression on Adult")
plt.xlabel("Disparity")
plt.ylabel("AUC")
plt.show()
# In[10]:
# define the trained logisctic regression model in the paper by Calmon
def RunLRClassifier(TrainList,TestList,TrainNewList,TestNewList,Z_features,X_features,Y_features):
LRModelsAUC=[]
LRTestPreds=[]
for i in range(0,len(TrainNewList)):
dft = pd.get_dummies(TrainNewList[i][Z_features+X_features])
lr=LogisticRegression()
lr.fit(dft,TrainNewList[i][Y_features])
dft = pd.get_dummies(TestNewList[i][Z_features+X_features])
proba=lr.predict_proba(dft)
ytrue=TestList[i][Y_features]
testauc=roc_auc_score(ytrue, proba[:, 1])
LRModelsAUC.append(testauc)
dft=TestList[i][Z_features+X_features+Y_features]
dft['pred']=proba[:,1]
LRTestPreds.append(dft)
return LRModelsAUC,LRTestPreds
# define the logistic regression model without using the sensitive information
def RunLRWithoutDClassifier(TrainList,TestList,Z_features,X_features,Y_features):
LRModelsAUC=[]
LRTestPreds=[]
for i in range(0,len(TrainList)):
dft = pd.get_dummies(TrainList[i][X_features])
lr=LogisticRegression()
lr.fit(dft,TrainList[i][Y_features])
dft = pd.get_dummies(TestList[i][X_features])
proba=lr.predict_proba(dft)
ytrue=TestList[i][Y_features]
testauc=roc_auc_score(ytrue, proba[:, 1])
LRModelsAUC.append(testauc)
dft=TestList[i][Z_features+X_features+Y_features]
dft['pred']=proba[:,1]
LRTestPreds.append(dft)
return LRModelsAUC,LRTestPreds
# define the standard logistic regression model
def RunPlainLRClassifier(TrainList,TestList,Z_features,X_features,Y_features):
LRModelsAUC=[]
LRTestPreds=[]
for i in range(0,len(TrainList)):
dft = pd.get_dummies(TrainList[i][Z_features+X_features])
lr=LogisticRegression()
lr.fit(dft,TrainList[i][Y_features])
dft = pd.get_dummies(TestList[i][Z_features+X_features])
proba=lr.predict_proba(dft)
ytrue=TestList[i][Y_features]
testauc=roc_auc_score(ytrue, proba[:, 1])
LRModelsAUC.append(testauc)
dft=TestList[i][Z_features+X_features+Y_features]
dft['pred']=proba[:,1]
LRTestPreds.append(dft)
return LRModelsAUC,LRTestPreds
# define the trained random forest model in the paper by Calmon
def RunRFClassifier(TrainList,TestList,TrainNewList,TestNewList,Z_features,X_features,Y_features):
RFModelsAUC=[]
RFTestPreds=[]
for i in range(0,len(TrainNewList)):
dft = pd.get_dummies(TrainNewList[i][Z_features+X_features])
#rf=RandomForestClassifier(n_estimators=30) # gives better AUC higher discrim
rf=RandomForestClassifier(max_depth=None, max_leaf_nodes=6, max_features=3) # gives slightly lesser AUC
#and lower discrim
rf.fit(dft,TrainNewList[i][Y_features])
dft = pd.get_dummies(TestNewList[i][Z_features+X_features])
proba=rf.predict_proba(dft)
ytrue=TestList[i][Y_features]
testauc=roc_auc_score(ytrue, proba[:, 1])
RFModelsAUC.append(testauc)
dft=TestList[i][Z_features+X_features+Y_features]
dft['pred']=proba[:,1]
RFTestPreds.append(dft)
return RFModelsAUC,RFTestPreds
# define the random forest model without using the sensitive information
def RunRFWithoutDClassifier(TrainList,TestList,Z_features,X_features,Y_features):
RFModelsAUC=[]
RFTestPreds=[]
for i in range(0,len(TrainList)):
dft = pd.get_dummies(TrainList[i][X_features])
#rf=RandomForestClassifier(n_estimators=30)
rf=RandomForestClassifier(max_depth=None, max_leaf_nodes=6, max_features=3)
rf.fit(dft,TrainList[i][Y_features])
dft = pd.get_dummies(TestList[i][X_features])
proba=rf.predict_proba(dft)
ytrue=TestList[i][Y_features]
testauc=roc_auc_score(ytrue, proba[:, 1])
RFModelsAUC.append(testauc)
dft=TestList[i][Z_features+X_features+Y_features]
dft['pred']=proba[:,1]
RFTestPreds.append(dft)
return RFModelsAUC,RFTestPreds
# define the standard random forest model
def RunPlainRFClassifier(TrainList,TestList,Z_features,X_features,Y_features):
RFModelsAUC=[]
RFTestPreds=[]
for i in range(0,len(TrainList)):
dft = pd.get_dummies(TrainList[i][Z_features+X_features])
rf=RandomForestClassifier()
rf.fit(dft,TrainList[i][Y_features])
dft = | pd.get_dummies(TestList[i][Z_features+X_features]) | pandas.get_dummies |
# coding: utf-8
# # Resting state analysis
# In[8]:
import pickle
from pathlib import Path
import os
import mne
import numpy as np
import scipy.stats
import matplotlib as mpl
import matplotlib.pyplot as plt
try:
get_ipython().magic('matplotlib inline')
except:
pass
import pandas as pd
from tqdm import tqdm as tqdm
# ## Take the raw data and frequency-transform it
#
# In this package I have already made scripts that effectively turn the CSV files into raw files using a generator function: `restraws()`. You can find that [here](data/processed/resting.py). It doesn't do anything other than: 1) load the data from CSV; 2) load the channel locations; 3) combine the two to make a neat [`Raw`](http://martinos.org/mne/dev/generated/mne.io.Raw.html) data with a proper montage, all zero-channels set as `bads`, and the subject's name in the info structure. The neat thing is that it does it _on each iteration_, so it loads the data one by one, and removes it from memory when the next one comes up.
#
# Here, I'm going to load that data, then average-ref it and do a time-frequency transform on it. This is already effectively preprocessed. The time-frequency data is saved to the data/interim/freqanalysis.
# In[ ]:
from data.processed.resting import restraws, restevents, nrest
for raw in restraws():
# output names
psdfname = (Path('.') / 'data' / 'interim' / 'freqanalysis' /
('rest-' + raw.info['subject_info'] + '.pickle'))
infofname = (Path('.') / 'data' / 'interim' / 'info' /
('rest-' + raw.info['subject_info'] + '.pickle'))
# save the info structure to file (incl bads)
with open(infofname, 'wb+') as f:
pickle.dump(raw.info, f)
# if it exists already, skip
if os.path.isfile(psdfname): continue
# try cropping it; this will fail if less than 200s; so skip
try:
raw.crop(tmin=0, tmax=200)
except ValueError:
continue
# average reference
raw.set_eeg_reference()
raw.apply_proj()
# do the time-frequency analysis
psd, freqs = mne.time_frequency.psd_multitaper(raw, fmin=1, fmax=30, n_jobs=4)
# save TFR and Info to pickle
with open(psdfname, 'wb+') as f:
pickle.dump((freqs, psd), f)
# ## Load the data back from pickle
#
# In `data/interim/freqanalysis` [another python script](data/interim/freqanalysis/__init__.py) sits that lets you load the data __back__ into memory via a generator. So here I do that, then fit the linear regression in semilog and loglog space.
# In[6]:
from data.interim.freqanalysis import psds
import scipy.stats
alldata = []
for pid, freq, psd in psds():
# identify the right frequency band
idx = ((freq > 4) & (freq < 7)) | ((freq > 14) & (freq < 24))
# fit linear regression in semilog space
semilogfits = [scipy.stats.linregress(freq[idx], np.log10(psd.T[idx, sensor]))
for sensor in range(psd.shape[0])]
loglogfits = [scipy.stats.linregress(np.log10(freq[idx]), np.log10(psd.T[idx, sensor]))
for sensor in range(psd.shape[0])]
# append a dictionary that holds all the relevant info
datadict = {'EID': pid}
for name, fits in [('semilog', semilogfits), ('loglog', loglogfits)]:
datadict.update(
{name + 'slopes_individual' : [fit.slope for fit in fits],
name + 'intercept_individual' : [fit.intercept for fit in fits],
name + 'rval_individual' : [fit.rvalue for fit in fits],
name + 'slopes_mean' : np.mean([fit.slope for fit in fits]),
name + 'intercept_mean' : np.mean([fit.intercept for fit in fits]),
name + 'rval_mean' : np.mean([fit.rvalue for fit in fits])}
)
alldata.append(datadict)
# ## Load the phenotypic data & combine with data so far
# In[17]:
from data import phenotypes
phenotypes = phenotypes.set_index('EID')
alldf = | pd.DataFrame(alldata) | pandas.DataFrame |
import pandas as __pd
import datetime as __dt
from multiprocessing import Pool as __Pool
import multiprocessing as __mp
from functools import reduce as __red
import logging as __logging
from seffaflik.__ortak.__araclar import make_requests as __make_requests
from seffaflik.__ortak import __araclar as __araclar, __dogrulama as __dogrulama
from seffaflik.elektrik.uretim import organizasyonlar as __organizasyonlar
__first_part_url = "market/"
def hacim(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), organizasyon_eic=""):
"""
İlgili tarih aralığı için ikili anlaşma arz/talep hacim bilgilerini vermektedir.
Not: "organizasyon_eic" değeri girildiği taktirde organizasyona ait saatlik arz/talep hacim bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
organizasyon_eic : metin formatında organizasyon eic kodu (Varsayılan: "")
Geri Dönüş Değeri
----------------
Arz/Talep İkili Anlaşma Miktarları (MWh)
"""
if __dogrulama.__baslangic_bitis_tarih_eic_dogrulama(baslangic_tarihi, bitis_tarihi, organizasyon_eic):
try:
particular_url = \
__first_part_url + "bilateral-contract-sell" + "?startDate=" + baslangic_tarihi + "&endDate=" + \
bitis_tarihi + "&eic=" + organizasyon_eic
json = __make_requests(particular_url)
df_arz = __pd.DataFrame(json["body"]["bilateralContractSellList"])
particular_url = \
__first_part_url + "bilateral-contract-buy" + "?startDate=" + baslangic_tarihi + "&endDate=" + \
bitis_tarihi + "&eic=" + organizasyon_eic
json = __make_requests(particular_url)
df_talep = __pd.DataFrame(json["body"]["bilateralContractBuyList"])
df = __araclar.__merge_ia_dfs_evenif_empty(df_arz, df_talep)
df["Saat"] = df["date"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df = df[["Tarih", "Saat", "Talep Miktarı", "Arz Miktarı"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def tum_organizasyonlar_hacim(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), hacim_tipi="NET"):
"""
İlgili tarih aralığı ve hacim tipi için tüm organizasyonların saatlik ikili anlaşma hacim bilgilerini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
hacim_tipi : metin formatında hacim tipi ("NET", "ARZ", yada "TALEP") (varsayılan: "NET")
Geri Dönüş Değeri
-----------------
Tüm Organizasyonların İA Hacim Bilgileri (Tarih, Saat, Hacim)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
list_org = __organizasyonlar()[["EIC Kodu", "Kısa Adı"]].to_dict("records")
org_len = len(list_org)
list_date_org_eic = list(zip([baslangic_tarihi] * org_len, [bitis_tarihi] * org_len, list_org))
list_date_org_eic = list(map(list, list_date_org_eic))
with __Pool(__mp.cpu_count()) as p:
if hacim_tipi.lower() == "net":
list_df_unit = p.starmap(__organizasyonel_net_hacim, list_date_org_eic, chunksize=1)
elif hacim_tipi.lower() == "arz":
list_df_unit = p.starmap(__organizasyonel_arz_hacim, list_date_org_eic, chunksize=1)
elif hacim_tipi.lower() == "talep":
list_df_unit = p.starmap(__organizasyonel_talep_hacim, list_date_org_eic, chunksize=1)
else:
__logging.error("Lütfen geçerli bir hacim tipi giriniz: Net, Arz, Talep", exc_info=False)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True),
list_df_unit)
return df_unit
def tum_gorevli_tedarik_hacim(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), hacim_tipi="NET"):
"""
İlgili tarih aralığı ve hacim tipi için tüm organizasyonların saatlik ikili anlaşma hacim bilgilerini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
hacim_tipi : metin formatında hacim tipi ("NET", "ARZ", yada "TALEP") (varsayılan: "NET")
Geri Dönüş Değeri
-----------------
Tüm Organizasyonların İA Hacim Bilgileri (Tarih, Saat, Hacim)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
org = __organizasyonlar()
org = org[(org["Adı"].str.contains("K1")) | (org["Adı"].str.contains("K2")) | (
org["Adı"].str.contains("K3"))].reset_index(drop=True)
list_org = org[["EIC Kodu", "Kısa Adı"]].to_dict("records")
org_len = len(list_org)
list_date_org_eic = list(zip([baslangic_tarihi] * org_len, [bitis_tarihi] * org_len, list_org))
list_date_org_eic = list(map(list, list_date_org_eic))
with __Pool(__mp.cpu_count()) as p:
if hacim_tipi.lower() == "net":
list_df_unit = p.starmap(__organizasyonel_net_hacim, list_date_org_eic, chunksize=1)
elif hacim_tipi.lower() == "arz":
list_df_unit = p.starmap(__organizasyonel_arz_hacim, list_date_org_eic, chunksize=1)
elif hacim_tipi.lower() == "talep":
list_df_unit = p.starmap(__organizasyonel_talep_hacim, list_date_org_eic, chunksize=1)
else:
__logging.error("Lütfen geçerli bir hacim tipi giriniz: Net, Arz, Talep", exc_info=False)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True),
list_df_unit)
return df_unit
def __organizasyonel_net_hacim(baslangic_tarihi, bitis_tarihi, org):
"""
İlgili tarih aralığı ve organizasyon için saatlik ikili anlaşma net hacim bilgilerini vermektedir.
Önemli Bilgi
------------
Organizasyon bilgisi girilmediği taktirde toplam piyasa hacmi bilgisi verilmektedir.
Parametreler
-----------
baslangic_tarihi: %YYYY-%AA-%GG formatında başlangıç tarihi
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi
org : dict formatında organizasyon EIC Kodu, Kısa Adı
Geri Dönüş Değeri
-----------------
Net İA Miktarı (MWh)
"""
try:
particular_url = \
__first_part_url + "bilateral-contract-sell" + "?startDate=" + baslangic_tarihi + "&endDate=" + \
bitis_tarihi + "&eic=" + org["EIC Kodu"]
json = __make_requests(particular_url)
df_arz = | __pd.DataFrame(json["body"]["bilateralContractSellList"]) | pandas.DataFrame |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pandas as pd
try:
import talib
except:
print('请安装TALIB后再调用此函数')
def CDL2CROWS(data):
res = talib.CDL2CROWS(data.open.values, data.high.values,
data.low.values, data.close.values)
return pd.DataFrame({'CDL2CROWS': res}, index=data.index)
def CDL3BLACKCROWS(data):
res = talib.CDL3BLACKCROWS(
data.open.values, data.high.values, data.low.values, data.close.values)
return pd.DataFrame({'CDL3BLACKCROWS': res}, index=data.index)
def CDL3INSIDE(data):
res = talib.CDL3INSIDE(
data.open.values, data.high.values, data.low.values, data.close.values)
return pd.DataFrame({'CDL3INSIDE': res}, index=data.index)
def CDL3LINESTRIKE(data):
res = talib.CDL3LINESTRIKE(
data.open.values, data.high.values, data.low.values, data.close.values)
return pd.DataFrame({'CDL3LINESTRIKE': res}, index=data.index)
def CDL3OUTSIDE(data):
res = talib.CDL3OUTSIDE(
data.open.values, data.high.values, data.low.values, data.close.values)
return pd.DataFrame({'CDL3OUTSIDE': res}, index=data.index)
def CDL3STARSINSOUTH(data):
res = talib.CDL3STARSINSOUTH(
data.open.values, data.high.values, data.low.values, data.close.values)
return pd.DataFrame({'CDL3STARSINSOUTH': res}, index=data.index)
def CDL3WHITESOLDIERS(data):
res = talib.CDL3WHITESOLDIERS(
data.open.values, data.high.values, data.low.values, data.close.values)
return | pd.DataFrame({'CDL3WHITESOLDIERS': res}, index=data.index) | pandas.DataFrame |
import pandas as pd
import bioframe
import pyranges as pr
import numpy as np
from io import StringIO
def bioframe_to_pyranges(df):
pydf = df.copy()
pydf.rename(
{"chrom": "Chromosome", "start": "Start", "end": "End"},
axis="columns",
inplace=True,
)
return pr.PyRanges(pydf)
def pyranges_to_bioframe(pydf):
df = pydf.df
df.rename(
{"Chromosome": "chrom", "Start": "start", "End": "end", "Count": "n_intervals"},
axis="columns",
inplace=True,
)
return df
def pyranges_overlap_to_bioframe(pydf):
## convert the df output by pyranges join into a bioframe-compatible format
df = pydf.df.copy()
df.rename(
{
"Chromosome": "chrom_1",
"Start": "start_1",
"End": "end_1",
"Start_b": "start_2",
"End_b": "end_2",
},
axis="columns",
inplace=True,
)
df["chrom_1"] = df["chrom_1"].values.astype("object") # to remove categories
df["chrom_2"] = df["chrom_1"].values
return df
chroms = ["chr12", "chrX"]
def mock_bioframe(num_entries=100):
pos = np.random.randint(1, 1e7, size=(num_entries, 2))
df = pd.DataFrame()
df["chrom"] = np.random.choice(chroms, num_entries)
df["start"] = np.min(pos, axis=1)
df["end"] = np.max(pos, axis=1)
df.sort_values(["chrom", "start"], inplace=True)
return df
############# tests #####################
def test_select():
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
region1 = "chr1:4-10"
df_result = pd.DataFrame([["chr1", 4, 5]], columns=["chrom", "start", "end"])
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX:4-6"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
def test_expand():
fake_bioframe = pd.DataFrame(
{"chrom": ["chr1", "chr1", "chr2"], "start": [1, 50, 100], "end": [5, 55, 200]}
)
fake_chromsizes = {"chr1": 60, "chr2": 300}
expand_bp = 10
fake_expanded = bioframe.expand(fake_bioframe.copy(), expand_bp, fake_chromsizes)
print(fake_expanded)
assert fake_expanded.iloc[0].start == 0 # don't expand below zero
assert (
fake_expanded.iloc[1].end == fake_chromsizes["chr1"]
) # don't expand above chromsize
assert (
fake_expanded.iloc[2].end == fake_bioframe.iloc[2].end + expand_bp
) # expand end normally
assert (
fake_expanded.iloc[2].start == fake_bioframe.iloc[2].start - expand_bp
) # expand start normally
def test_overlap():
### test consistency of overlap(how='inner') with pyranges.join ###
### note does not test overlap_start or overlap_end columns of bioframe.overlap
df1 = mock_bioframe()
df2 = mock_bioframe()
assert df1.equals(df2) == False
p1 = bioframe_to_pyranges(df1)
p2 = bioframe_to_pyranges(df2)
pp = pyranges_overlap_to_bioframe(p1.join(p2, how=None))[
["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
]
bb = bioframe.overlap(df1, df2, how="inner")[
["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
]
pp = pp.sort_values(
["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
ignore_index=True)
bb = bb.sort_values(
["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
ignore_index=True)
pd.testing.assert_frame_equal(bb, pp, check_dtype=False, check_exact=False)
print("overlap elements agree")
### test overlap on= [] ###
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=["chrom1", "start", "end", "strand", "animal"],
)
df2 = pd.DataFrame(
[["chr1", 6, 10, "+", "dog"], ["chrX", 7, 10, "-", "dog"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
)
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_2"].values)) == 3
b = bioframe.overlap(
df1,
df2,
on=["strand"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum( | pd.isna(b["index_2"].values) | pandas.isna |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
#ソースコードは、https://nigimitama.hatenablog.jp/entry/2020/01/25/110921
# 表側が順番通りの整数でないデータフレームにも対応した場合
def slice_df(df: pd.DataFrame, size: int) -> list:
"""pandas.DataFrameを行数sizeずつにスライスしてリストに入れて返す"""
previous_index = list(df.index)
df = df.reset_index(drop=True)
n = df.shape[0]
list_indices = [(i, i+size) for i in range(0, n, size)]
df_indices = [(i, i+size-1) for i in range(0, n, size)]
sliced_dfs = []
for i in range(len(df_indices)):
begin_i, end_i = df_indices[i][0], df_indices[i][1]
begin_l, end_l = list_indices[i][0], list_indices[i][1]
df_i = df.loc[begin_i:end_i, :]
df_i.index = previous_index[begin_l:end_l]
sliced_dfs += [df_i]
return sliced_dfs
excelFilePath = ""
df_sheet_all = pd.read_excel(excelFilePath, sheet_name=None, index_col=0)
bk = pd.ExcelFile(excelFilePath)
for sheetNum in range(0,len(bk.sheet_names)-2):
## 指定したシートをデータにする
df_sheet_all[bk.sheet_names[sheetNum]] = df_sheet_all[bk.sheet_names[sheetNum]].fillna(99) ##NaNを99に
df_sheet_all[bk.sheet_names[sheetNum]].columns = df_sheet_all[bk.sheet_names[sheetNum]].iloc[13] ##行名を指定
school = bk.sheet_names[sheetNum]
yyyy = df_sheet_all[bk.sheet_names[sheetNum]].iat[6, 4]
mm = df_sheet_all[bk.sheet_names[sheetNum]].iat[7, 4]
period = df_sheet_all[bk.sheet_names[sheetNum]].iat[8, 4]
grade = df_sheet_all[bk.sheet_names[sheetNum]].iat[9, 4]
dfTmp = df_sheet_all[bk.sheet_names[sheetNum]].drop(df_sheet_all[bk.sheet_names[sheetNum]].index[range(0,14)])
dfTmp = dfTmp.drop(99,axis=1)
dfTmp.insert(0,'実施月' ,mm)
dfTmp.insert(0,'位置付け' ,period)
dfTmp.insert(0,'実施対象' ,grade)
dfTmp.insert(0,'実施年度' ,yyyy)
dfTmp.insert(0,'学校名' ,school)
print(school + "の重複削除前: " + str(len(dfTmp)))
print(school + "の重複削除前: " + str(len(dfTmp.drop_duplicates())))
if sheetNum == 0:
dfAll = dfTmp.copy()
else:
dfAll = pd.concat([dfAll,dfTmp])
print("----")
dfAll = dfAll[dfAll['実施年度'] != 99]
#行明を変更して、採点する
ansList = list(dfAll.columns.values[10:])
colList = dfAll.columns.values
qList = list(range(1,len(ansList)+1))
colList[10:]=qList
colList[9] = "合計点"
dfAll.columns = colList
dfbin = dfAll.copy()
#Pandasで合計点の計算
for j in range(len(dfAll)):
count = 0
for i in range(len(ansList)):
if(dfAll.iat[j,10+i] == ansList[i]):
count = count + 1
dfbin.iat[j,10+i] = 1
else:
dfbin.iat[j,10+i] = 0
dfAll.iat[j,9] = count
dfbin.iat[j,9] = count
# dfAll:全データ
# dfbin:01のデータ
#合計点が高い方からソート
dfAll_sort = dfAll[9:].sort_values("合計点" , ascending=False)
#5段階に分ける
grp = 5
size = int( dfAll_sort.shape[0] / grp ) +1
data_slice = slice_df(dfAll_sort , size = size)
#配列に入れる
KT = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pickle as p
import json
import pandas as pd
import os
from flask import Flask, request, redirect, url_for, flash, jsonify
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello, World!"
@app.route('/api/predict', methods=['POST'])
def makecalc():
data = request.get_json()
# print(data)
# prediction = np.array2string(model.predict(data))
data_json=json.loads(data)
df= | pd.DataFrame(data_json) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pdb
import numba
import six
import pandas as pd
import numpy as np
import inspect
import datetime
from sklearn import preprocessing
from numpy import log
from alphax.singleton import Singleton
# rolling corr of two pandas dataframes
def rolling_corr(x, y, win):
corr_df = pd.DataFrame(data=np.NaN, index=x.index, columns=x.columns)
for begin, end in zip(x.index[:-win + 1], x.index[win - 1:]):
corr_df.loc[end] = x.loc[begin:end].corrwith(y.loc[begin:end])
return corr_df
# rolling cov of two pandas dataframes
def rolling_cov(x, y, win):
cov_df = pd.DataFrame(data=np.NaN, index=x.index, columns=x.columns)
for begin, end in zip(x.index[:-win + 1], x.index[win - 1:]):
x_std = x.loc[begin:end].std()
y_std = y.loc[begin:end].std()
cov_df.loc[end] = x.loc[begin:end].corrwith(y.loc[begin:end]) * x_std * y_std
return cov_df
# rolling rank of a pandas dataframe
def rolling_rank(df, win):
rank_df = pd.DataFrame(data=np.NaN, index=df.index, columns=df.columns)
for begin, end in zip(df.index[:-win + 1], df.index[win - 1:]):
rank_df.loc[end] = df.loc[begin:end].rank(axis=0, pct=True).iloc[-1]
return rank_df
# rolling dot of a pandas dataframe
def rolling_dot(df, x, win):
dot_df = pd.DataFrame(data=np.NaN, index=df.index, columns=df.columns)
for begin, end in zip(df.index[:-win + 1], df.index[win - 1:]):
# dot_df.loc[end] = x.dot(df.loc[begin:end])
dot_df.loc[end] = np.dot(x, df.loc[begin:end].values)
return dot_df
# rolling regression residual
def rolling_regresi(y, x, win):
resi_df = | pd.DataFrame(data=np.NaN, index=y.index, columns=y.columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 23 11:44:07 2021
@author: <NAME>
"""
from openpyxl import load_workbook
import pandas as pd
if __name__ == '__main__':
'''
writer = pd.ExcelWriter("./jpeg_result.xlsx",engine="openpyxl")
wb = load_workbook(writer.path)
writer.book = wb
df = pd.DataFrame({"a":[0],'v':[2]})
df.to_excel(writer,sheet_name="sad")
writer.save()
writer.close()
'''
writer = | pd.ExcelWriter("./jpeg_result.xlsx",engine='openpyxl') | pandas.ExcelWriter |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = | concat([df, df], keys=[1, 2], names=["level2"]) | pandas.concat |
# Author: <NAME>, <NAME>, <NAME>
# Date: 2020/11/27
"""Create transformed train and test files .
Usage: src/preprocess.py <input_file> <input_file1> <output_file> <output_file1>
Options:
<input_file> Path (including filename and file extension) to train file
<input_file1> Path (including filename and file extension) to test file
<output_file> Path (including filename and file extension) to transformed train file
<output_file1> Path (including filename and file extension) to transformed test file
"""
from docopt import docopt
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import sys
from sklearn.compose import ColumnTransformer, TransformedTargetRegressor, make_column_transformer
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, StandardScaler
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
opt = docopt(__doc__)
def main(input_file, input_file1, output_file,output_file1):
# read train_df.csv
train = pd.read_csv(input_file)
test = pd.read_csv(input_file1)
#create split the train_df
X_train, y_train = train.drop(columns=["quality_level"]), train["quality_level"]
X_test, y_test = test.drop(columns=["quality_level"]), test["quality_level"]
# categorize the features
numeric_features = ["fixed_acidity", "volatile_acidity", "citric_acid", "residual_sugar", "chlorides", "free_sulfur_dioxide", "total_sulfur_dioxide", "density", "pH", "sulphates", "alcohol"]
binary_features = ['wine_type']
target = ['quality_level']
# make preprocessor
preprocessor = make_column_transformer(
(StandardScaler(), numeric_features),
(OneHotEncoder(handle_unknown="error", drop="if_binary"), binary_features))
# transform data
preprocessor.fit(X_train)
col = numeric_features + list(preprocessor.named_transformers_["onehotencoder"].get_feature_names())
X_train_pp = preprocessor.transform(X_train)
X_test_pp = preprocessor.transform(X_test)
# create transformed test and train data
transformed_train = pd.DataFrame(X_train_pp, index = X_train.index , columns = col)
train_pp = | pd.concat([transformed_train, y_train], axis=1) | pandas.concat |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import sompy
from sompy.sompy import SOMFactory
from sompy.visualization.mapview import View2D
from sompy.visualization.bmuhits import BmuHitsView
from sompy.visualization.hitmap import HitMapView
from dimred_clustering import DataPreprocess
from sklearn.preprocessing import RobustScaler, StandardScaler
from src.models.dynamic_time_warping import Preprocessing
def knn_elbow(df, k_range=20, plot=False):
from sklearn.cluster import KMeans
scores = {}
for i in range(1, k_range+1):
kmeans = KMeans(n_clusters=i)
clusters_fit = kmeans.fit_predict(df)
scores[i] = kmeans.inertia_
if plot == True:
| pd.DataFrame(scores, index=['score']) | pandas.DataFrame |
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
from scipy.stats import chi2
from sklearn.cluster import KMeans
def calc_IV(data, var_name, var_name_target):
"""
计算各分组的WOE值以及IV值
:param data: DataFrame 输入数据
:param var_name: str 分箱后的变量
:param var_name_target: str 标签变量
:return: 各分箱的woe和iv值
计算公式:WOE_i = ln{(sum_i / sum_T) / [(size_i - sum_i) / (size_T - sum_T)]}
计算公式:IV_i = [sum_i / sum_T - (size_i - sum_i) / (size_T - sum_T)] * WOE_i
"""
# 计算全体样本中好坏样本的比重
count = pd.crosstab(data[var_name], data[var_name_target])
try:
# good = (count[count.columns[count.columns.values > 0]].sum(axis=1) / count[
# count.columns[count.columns.values > 0]].values.sum()).values # ???
good = (count[1] / count[1].sum()).values
bad = (count[0] / count[0].sum()).values
except Exception:
exit('请确认标签变量(y)的取值为且仅为[0,1]')
woe = np.log(good / bad)
if 0 in bad:
ind = np.where(bad == 0)
woe[ind] = 0
# print('第%s类负例样本个数为0!!!' % ind)
if 0 in good:
ind = np.where(good == 0)
woe[ind] = 0
# print('第%s类正例样本个数为0!!!' % ind)
iv = (good - bad) * woe
return woe, iv
def group_to_col(data, group, labels):
"""
将特征的分组信息映射至原始数据列
:param data: DataFrame 原始数据列
:param group: list 分组信息
:param labels: list 分组代号
:return:
"""
assert len(group) == len(labels), "分组个数与分组编号数目不一致!"
mapper = {ele: labels[i] for i, k in enumerate(group) for ele in k}
data = data.map(mapper)
return data
def unique_nan(data): # 存在缺失值时,unique会保留nan,crosstab之类不会保留缺失值,统一不处理缺失值
return np.array(list(filter(lambda ele: ele == ele, data.unique()))) # 内存&速度优化:dropna
# return np.array(data.dropna().unique())
class Discretization(object):
"""
离散化基类
"""
def __init__(self, max_interval=6, feature_type=0):
"""
初始化参数
:param max_interval: int 最大分箱数量
:param feature_type: bool 待分箱变量的类型(0:连续 1:离散)
"""
self.max_interval = max_interval
self.feature_type = feature_type
def dsct_pipeline(self, data, var_name, var_name_target=None):
"""
离散化处理流程
:param data: DataFrame 原始输入数据
:param var_name: str 待离散化变量
:param var_name_target: 目标变量
:return: 有序的分割点列表或者分组列表
"""
assert var_name in data.columns, "数据中不包含变量%s,请检查数据" % (var_name)
if len(unique_nan(data[var_name])) <= self.max_interval:
# print('warning:变量%s的分箱数量大于或等于其不同取值个数,无需进行分箱。\n变量的取值如下:%s' % (var_name, unique_nan(data[var_name])))
group = self.processing_when_not_binning(data, var_name, var_name_target)
group = self.postprocessing(group)
else:
group = self.dsct_general_method(data, var_name, var_name_target)
# print("分箱结束!")
return group
def processing_when_not_binning(self, data, var_name, var_name_target=None):
group = unique_nan(data[var_name]).reshape(-1, 1).tolist() # 分组信息
return group
def postprocessing(self, group):
"""
后处理:根据feature_type修改返回的group样式(feature_type=0:返回分割点列表;feature_type=1:返回分箱成员列表)
:param group:
:return:
"""
group = [sorted(ele) for ele in group]
if not self.feature_type:
group.sort()
group = self.group_to_cutpoints(group)
return group
def cut_points_expand(self, cut_points, precision=3):
"""
扩展分割点的边界,以包含最大最小值
:param cut_points: list 分组信息
:param precision:
:return: list 扩展后的魅族分割点(长度比cut_points大1)
"""
cut_points[0] = cut_points[0] - 0.001 if cut_points[0] == 0 else cut_points[0] - abs(
cut_points[0]) * 0.001 # 包含最小值
cut_points[-1] = cut_points[-1] + 0.001 if cut_points[-1] == 0 else cut_points[-1] + abs(
cut_points[-1]) * 0.001 # 包含最大值
## 保留指定小数位,有可能会导致cut_points部分重合
# cut_points[0] = floor(cut_points[0] * pow(10, precision)) / pow(10, precision) # 包含最小值
# cut_points[1:-1] = [round(ele, precision) for ele in cut_points[1:-1]]
# cut_points[-1] = ceil(cut_points[-1] * pow(10, precision)) / pow(10, precision) # 包含最大值
return cut_points
def group_to_cutpoints(self, group):
"""
将group转换为cut_points(仅使用于连续型变量)
:param group: list 分组信息
:return: list 每组的分割点(长度比cut_points大1)
"""
cut_points = [group[0][0]] + [ele[-1] for ele in group]
cut_points = self.cut_points_expand(cut_points)
return cut_points
def encode_by_mapdict(self, srs, map_dict):
if srs in map_dict.keys():
return map_dict[srs]
else:
return srs
class equalWide(Discretization):
"""
等宽分箱
"""
def dsct_general_method(self, data, var_name, var_name_target=None):
var_name_af = var_name + '_BIN'
if self.feature_type:
# 1. 统计离散特征各取值的正例样本占比
data[var_name_af] = 0
data.loc[data[var_name_target] > 0, var_name_af] = 1
data[var_name_af] = data.groupby(var_name)[var_name_af].transform('mean')
else:
data[var_name_af] = data[var_name].copy()
data[var_name_af], group = pd.cut(data[var_name_af], bins=self.max_interval, retbins=True,
duplicates='drop', include_lowest=True, labels=False)
group = group.tolist()
if len(group) - 1 != self.max_interval:
pass
# print('warning:分箱后,%s的箱体个数%s与您输入的箱体数量%s不符,可能是变量%s的相同取值过多,导致样本分布不均衡,请检查数据集。' % (
# var_name, len(group) - 1, self.max_interval, var_name))
group = self.cut_points_expand(group)
if self.feature_type:
group = [list(set(data[var_name][data[var_name_af] == ele])) for ele in unique_nan(data[var_name_af])]
return group
class equalFreq(Discretization):
"""
等频分箱
"""
def dsct_general_method(self, data, var_name, var_name_target=None):
var_name_af = var_name + '_BIN'
if self.feature_type:
# 1. 统计离散特征各取值的正例样本占比
data[var_name_af] = 0
data.loc[data[var_name_target] > 0, var_name_af] = 1
data[var_name_af] = data.groupby(var_name)[var_name_af].transform('mean')
else:
data[var_name_af] = data[var_name].copy()
data[var_name_af], group = pd.qcut(data[var_name_af], q=self.max_interval, retbins=True, precision=3,
duplicates='drop', labels=False)
group = group.tolist()
if len(group) - 1 != self.max_interval:
pass
# print('warning:分箱后,%s的箱体个数%s与您输入的箱体数量%s不符,可能是变量%s的相同取值过多,导致样本分布不均衡,请检查数据集。' % (
# var_name, len(group) - 1, self.max_interval, var_name))
group = self.cut_points_expand(group)
if self.feature_type:
group = [list(set(data[var_name][data[var_name_af] == ele])) for ele in
unique_nan(data[var_name_af])]
return group
class SuperDiscretization(Discretization):
"""
监督离散化基类
"""
def dsct_pipeline(self, data, var_name, var_name_target):
"""
离散化处理流程
:param data: DataFrame 输入数据
:param var_name: str 待离散化变量
:param var_name_targe: str 标签变量(y)
:return:
"""
assert var_name_target in data.columns, "数据中不包含类别变量%s,请检查数据!" % (var_name_target)
group = super(SuperDiscretization, self).dsct_pipeline(data, var_name, var_name_target)
return group
def dsct_general_method(self, data, var_name, var_name_target):
"""
离散化通用功能
"""
# 1. 初始化:将每个值视为一个箱体 & 统计各取值的正负样本分布并排序
count, var_type = self.dsct_init(data, var_name, var_name_target, self.feature_type)
group = self.group_init(count) # 分组信息
# print("分箱初始化完成!")
# 2. 分箱主体
group = self.dsct(count, group, self.max_interval)
# print("分箱主体功能完成")
# 3. 后处理
group = self.postprocessing(group)
return group
def dsct_init(self, data, var_name_bf, var_name_target, feature_type):
"""
特征离散化节点初始化:统计各取值的正负样本分布[正例样本个数,负例样本个数]
:param data: DataFrame 输入数据
:param var_name_bf: str 待分箱变量
:param var_name_target: str 标签变量(y)
:param feature_type: 特征类型:0(连续) 1(离散)
:return: DataFrame 排好序的各组中正负样本分布 count
"""
# 统计待离散化变量的取值类型(string or digits)
data_type = data[var_name_bf].apply(lambda x: type(x)).unique()
var_type = True if str in data_type else False # 实际取值的类型:false(数字) true(字符)
# 是否需要根据正例样本比重编码,True:需要,False:不需要
# 0(连续) 1(离散)
# false(数字) 0 0(离散有序)
# true(字符) × 1(离散无序)
if feature_type == var_type:
ratio_indicator = var_type
elif feature_type == 1:
ratio_indicator = 0
# print("特征%s为离散有序数据,按照取值大小排序!" % (var_name_bf))
elif feature_type == 0:
exit(code="特征%s的类型为连续型,与其实际取值(%s)型不一致,请重新定义特征类型!!!" % (var_name_bf, data_type))
# 统计各分箱(group)内正负样本分布[累计样本个数,正例样本个数,负例样本个数]
count = pd.crosstab(data[var_name_bf], data[var_name_target])
total = count.sum(axis=1)
# 排序:离散变量按照pos_ratio排序,连续变量按照index排序
if ratio_indicator:
count['pos_ratio'] = count[1] * 1.0 / total # 计算正例比例
count = count.sort_values('pos_ratio') # 离散变量按照pos_ratio排序
count = count.drop(columns=['pos_ratio'])
else:
count = count.sort_index() # 连续变量按照index排序
return count, ratio_indicator
class clusterDsct(Discretization):
"""
聚类分箱:仅针对离散变量
先对var_name变量中的各个取值,计算正例样本占该取值总样本的比例(ratio),然后根据ratio值聚类分组,最后计算WOE和IV值,用于评价该分组的合理性
"""
def dsct_general_method(self, data, var_name, var_name_target):
if self.feature_type:
# 1. 初始化:将每个值视为一个箱体 & 统计各取值的正负样本分布并排序
count = pd.crosstab(data[var_name], data[var_name_target])
group = np.array(count.index)
# 2. 聚类分组
ratio = (count.iloc[:, count.columns.values > 0].sum(axis=1) * 1.0 / count.sum(
axis=1)).values # 正例样本占该取值总样本比值
model = KMeans(n_clusters=self.max_interval, init='k-means++')
label_pred = model.fit_predict(ratio.reshape(-1, 1))
# 3. 更新分组信息
group_update = [list(group[label_pred == i]) for i in range(label_pred.max() + 1)]
group = [tmp for tmp in group_update if tmp]
else:
raise ValueError("聚类分箱暂不支持连续特征!")
return group
class DependencyDsct(SuperDiscretization):
"""
基于卡方的离散化方法
"""
def __init__(self, max_interval, feature_type, sig_leval=0.05, n_chi2_dsct_thresh=100):
super(DependencyDsct, self).__init__(max_interval, feature_type)
self.sig_level = sig_leval
self.n_chi2_dsct_thresh = n_chi2_dsct_thresh
def dsct_general_method(self, data, var_name, var_name_target):
"""
离散化处理流程
"""
# 类别数太多时
status = 0
if len(unique_nan(data[var_name])) > self.n_chi2_dsct_thresh:
# 粗分组
status = 1
data_bf, var_name_bf, group_bf = self.data_split(data, var_name, var_name_target, self.n_chi2_dsct_thresh,
self.feature_type)
else:
data_bf = data
var_name_bf = var_name
# 1. 初始化:将每个值视为一个箱体 & 统计各取值的正负样本分布并排序
count, var_type = self.dsct_init(data_bf, var_name_bf, var_name_target, self.feature_type)
group = self.group_init(count)
# print("分组初始化完成!")
# 2. 分箱主体
group = self.dsct(count, group, self.max_interval)
# print("分箱主体功能完成!")
# 映射至粗分组区间
if status:
if not self.feature_type:
group = [(group_bf[list(map(int, np.array(ele) + 1))]).tolist() for ele in group]
group[0].append(group_bf[0])
else:
group = [sum(np.array(group_bf)[list(map(int, np.array(ele)))], []) for ele in group]
# 后处理
group = self.postprocessing(group)
return group
def dsct(self, count, group, max_interval):
"""
离散化主体方法
:param count: DataFrame 待分箱变量的分布统计
:param max_interval: int 最大分箱数量
:return: 分组信息(group)
"""
self.deg_freedom = len(count.columns) - 1 # 自由度(degree of freedom)= y类别数-1
self.chi2_threshold = chi2.ppf(1 - self.sig_level, self.deg_freedom) # 卡方阈值
return group
def group_init(self, count):
# 获取初始分组
group = np.array(count.index).reshape(-1, 1).tolist() # 分组信息
return group
def calc_chi2(self, count, group1, group2):
"""
根据分组信息(group)计算各分组的卡方值
:param count: DataFrame 待分箱变量各取值的正负样本数
:param group1: list 单个分组信息
:param group2: list 单个分组信息
:return: 该分组的卡方值
"""
count_intv1 = count.loc[count.index.isin(group1)].sum(axis=0).values
count_intv2 = count.loc[count.index.isin(group2)].sum(axis=0).values
count_intv = np.vstack((count_intv1, count_intv2))
# 计算四联表
row_sum = count_intv.sum(axis=1)
col_sum = count_intv.sum(axis=0)
total_sum = count_intv.sum()
# 计算期望样本数
count_exp = np.ones(count_intv.shape) * col_sum / total_sum
count_exp = (count_exp.T * row_sum).T
# 计算卡方值
chi2 = (count_intv - count_exp) ** 2 / count_exp
chi2[count_exp == 0] = 0
return chi2.sum()
def merge_adjacent_intervals(self, count, chi2_list, group):
"""
根据卡方值合并卡方值最小的相邻分组并更新卡方值
:param count: DataFrame 待分箱变量的分布统计
:param chi2_list: list 每个分组的卡方值
:param group: list 分组信息
:return: 合并后的分组信息及卡方值
"""
min_idx = chi2_list.index(min(chi2_list))
# 根据卡方值合并卡方值最小的相邻分组
group[min_idx] = group[min_idx] + group[min_idx + 1]
group.remove(group[min_idx + 1])
# 更新卡方值
if min_idx == 0:
chi2_list.pop(min_idx)
chi2_list[min_idx] = self.calc_chi2(count, group[min_idx], group[min_idx + 1])
elif min_idx == len(group) - 1:
chi2_list[min_idx - 1] = self.calc_chi2(count, group[min_idx - 1], group[min_idx])
chi2_list.pop(min_idx)
else:
chi2_list[min_idx - 1] = self.calc_chi2(count, group[min_idx - 1], group[min_idx])
chi2_list.pop(min_idx)
chi2_list[min_idx] = self.calc_chi2(count, group[min_idx], group[min_idx + 1])
return chi2_list, group
def update_group_by_chi2(self, count, group, idx):
if idx == 0:
merge_idx = idx + 1
elif idx == len(group) - 1:
merge_idx = idx - 1
else:
# 根据卡方值合并卡方值最小的相邻分组
merge_idx = idx + 1 if self.calc_chi2(count, group[idx - 1], group[idx]) > self.calc_chi2(count, group[idx],
group[
idx + 1]) else idx - 1
group[idx] = group[idx] + group[merge_idx]
group.remove(group[merge_idx])
return group
def guarantee_completeness(self, count, group):
"""
检查每个箱体中是否都有正负样本。如果没有,则需要跟相邻的箱体合并,直至每个箱体都包含正负样本
:param count: DataFrame 待分箱变量的分布统计
:param group: list 分组信息
:return: list 分组信息
"""
while True:
# 计算pos_ratio
count_update = [count[count.index.isin(ele)].sum(axis=0).tolist() for ele in group]
count_update = pd.DataFrame(count_update, columns=count.columns)
count_update['pos_ratio'] = count_update[0] / count_update.sum(axis=1)
# 合并分组
if min(count_update['pos_ratio'] == 0) or max(count_update['pos_ratio']) == 1:
idx = count_update[count_update['pos_ratio'].isin([0, 1])].index[0]
group = self.update_group_by_chi2(count_update, group, idx)
else:
break
return group
def data_split(self, data, var_name, var_name_target, n_chi2_dsct_thresh, feature_type):
# 粗分组
var_name_bf = var_name + '_coarse'
if not feature_type: # 连续型
# print("特征%s的取值数目%s超过100个,先采用等频分箱将其粗分为%s组,然后采用卡方分箱" % (
# var_name, len(unique_nan(data[var_name])), n_chi2_dsct_thresh)) # 等频分箱
data[var_name_bf], cut_points = pd.qcut(data[var_name], q=n_chi2_dsct_thresh, labels=False,
duplicates='drop', retbins=True) # 等频分箱
return data, var_name_bf, cut_points
else: # 离散型
# print("特征%s的取值数目%s超过100个,先采用聚类分箱将其粗分为%s组,然后采用卡方分箱" % (
# var_name, len(unique_nan(data[var_name])), n_chi2_dsct_thresh)) # 等频分箱
group_bf = clusterDsct(n_chi2_dsct_thresh, 1).dsct_pipeline(data[[var_name, var_name_target]], var_name,
var_name_target) # 聚类分箱
group_bf = [ele for ele in group_bf if ele != []]
data[var_name_bf] = group_to_col(data[var_name], group_bf, range(len(group_bf)))
return data, var_name_bf, group_bf
class chiMerge(DependencyDsct):
def dsct(self, count, group, max_interval):
group = super(chiMerge, self).dsct(count, group, max_interval)
# 2. 计算相邻分组的卡方值
chi2_list = [self.calc_chi2(count, group[idx], group[idx + 1]) for idx in range(len(group) - 1)]
# 3. 合并相似分组并更新卡方值
while 1:
# if min(chi2_list) >= self.chi2_threshold:
# # print("最小卡方值%.3f大于卡方阈值%.3f,分箱合并结束!!!" % (min(chi2_list), chi2_threshold))
# break
if len(group) <= max_interval:
# print("分组长度%s等于指定分组数%s" % (len(group), max_interval))
break
chi2_list, group = self.merge_adjacent_intervals(count, chi2_list, group)
# # print(chi2_list)
return group
class chi2Merge(DependencyDsct):
def __init__(self, max_interval, feature_type, sig_level=0.5, inconsistency_rate_thresh=0.05, sig_level_desc=0.1):
super(chi2Merge, self).__init__(max_interval, feature_type, sig_level)
self.inconsistency_rate_thresh = inconsistency_rate_thresh
self.sig_level_desc = sig_level_desc
def dsct(self, count, group, max_interval):
group = super(chi2Merge, self).dsct(count, group, max_interval)
sig_level = self.sig_level
# 2. 阶段1:
while self.calc_inconsistency_rate(count, group) < self.inconsistency_rate_thresh: # 不一致性检验
# 2. 计算相邻分组的卡方值
chi2_threshold = chi2.ppf(1 - sig_level, self.deg_freedom) # 卡方阈值
chi2_list = [self.calc_chi2(count, group[idx], group[idx + 1]) for idx in range(len(group) - 1)]
# 3. 合并相似分组并更新卡方值
while 1:
if min(chi2_list) >= chi2_threshold:
# print("最小卡方值%.3f大于卡方阈值%.3f,分箱合并结束!!!" % (min(chi2_list), chi2_threshold))
break
if len(group) <= max_interval:
# print("分组长度%s等于指定分组数%s" % (len(group), max_interval))
break
chi2_list, group = self.merge_adjacent_intervals(count, chi2_list, group)
# 阈值更新
sig_level = sig_level - self.sig_level_desc # 降低显著性水平,提高卡方阈值
if len(group) <= max_interval:
break
# print("Chi2分箱第一阶段完成!!!")
# 3. 阶段2:
# print("Chi2分箱第二阶段开始:")
sig_level = sig_level + self.sig_level_desc # 回到上一次的值
while True:
# 2. 计算相邻分组的卡方值
chi2_threshold = chi2.ppf(1 - sig_level, self.deg_freedom) # 卡方阈值
chi2_list = [self.calc_chi2(count, group[idx], group[idx + 1]) for idx in range(len(group) - 1)]
# 3. 合并相似分组并更新卡方值
while 1:
if min(chi2_list) >= chi2_threshold:
# print("最小卡方值%.3f大于卡方阈值%.3f,分箱合并结束!!!" % (min(chi2_list), chi2_threshold))
break
if len(group) <= max_interval:
# print("分组长度%s等于指定分组数%s" % (len(group), max_interval))
break
chi2_list, group = self.merge_adjacent_intervals(count, chi2_list, group)
# 阈值更新
in_consis_rate = self.calc_inconsistency_rate(count, group)
if in_consis_rate < self.inconsistency_rate_thresh: # 不一致性检验
sig_level = sig_level - self.sig_level_desc # 降低显著性水平,提高卡方阈值
else:
# print("分组的不一致性(%.3f)大于阈值(%.3f),无法继续合并分箱!!!" % (in_consis_rate, self.inconsistency_rate_thresh))
break
# print("Chi2分箱第二阶段完成!!!")
return group
def calc_inconsistency_rate(self, count, group):
"""
计算分组的不一致性,参考论文《Feature Selection via Discretizations》
:param count: DataFrame 待分箱变量的分布统计
:param group: list 分组信息
:return: float 该分组的不一致性
"""
inconsistency_rate = 0.0
for intv in group:
count_intv = count.loc[count.index.isin(intv)].sum(axis=0)
inconsistency_rate += count_intv.sum() - max(count_intv)
inconsistency_rate = inconsistency_rate / count.sum().sum()
return inconsistency_rate
class bestChi(chiMerge):
def dsct(self, count, group, max_interval):
group = super(bestChi, self).dsct(count, group, max_interval)
# 检查每个箱体是否都有好坏样本
group = self.guarantee_completeness(count, group)
# print("各分组好坏样本分布检验完成!")
# 单项占比检验
group = self.guarantee_proportion(count, group)
# print("各分组单项占比检验完成!")
while not self.check_posRate_monotone(count, group): # 单调性检验
# 合并分组
max_interval -= 1
group = super(bestChi, self).dsct(count, group, max_interval)
# print("单调性检验完成")
if len(group) < max_interval:
pass
# print("分箱后的箱体个数(%s)与输入的箱体个数(%s)不符,因为评分阿卡最优分箱需要优先确保单调性!" % (len(group), max_interval))
return group
def check_posRate_monotone(self, count, group):
"""
判断该分项的正样本比例是否单调
:param count:
:param group:
:return:
"""
if len(group) <= 2:
return True
count_update = [count[count.index.isin(ele)].sum(axis=0).tolist() for ele in group]
count_update = | pd.DataFrame(count_update, columns=count.columns) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import json
import pandas as pd
from pandas.api.types import is_numeric_dtype
import numpy as np
from scipy.stats import ks_2samp, chisquare
import plotly.graph_objs as go
import plotly.express as px
from evidently.model.widget import BaseWidgetInfo, AlertStats, AdditionalGraphInfo
from evidently.widgets.widget import Widget
red = "#ed0400"
grey = "#4d4d4d"
class ClassConfusionBasedFeatureDistrTable(Widget):
def __init__(self, title: str):
super().__init__()
self.title = title
def get_info(self) -> BaseWidgetInfo:
if self.wi:
return self.wi
raise ValueError("neither target nor prediction data provided")
def calculate(self, reference_data: pd.DataFrame, production_data: pd.DataFrame, column_mapping):
if column_mapping:
date_column = column_mapping.get('datetime')
id_column = column_mapping.get('id')
target_column = column_mapping.get('target')
prediction_column = column_mapping.get('prediction')
num_feature_names = column_mapping.get('numerical_features')
target_names = column_mapping.get('target_names')
if num_feature_names is None:
num_feature_names = []
else:
num_feature_names = [name for name in num_feature_names if is_numeric_dtype(reference_data[name])]
cat_feature_names = column_mapping.get('categorical_features')
if cat_feature_names is None:
cat_feature_names = []
else:
cat_feature_names = [name for name in cat_feature_names if is_numeric_dtype(reference_data[name])]
else:
date_column = 'datetime' if 'datetime' in reference_data.columns else None
id_column = None
target_column = 'target' if 'target' in reference_data.columns else None
prediction_column = 'prediction' if 'prediction' in reference_data.columns else None
utility_columns = [date_column, id_column, target_column, prediction_column]
target_names = None
num_feature_names = list(set(reference_data.select_dtypes([np.number]).columns) - set(utility_columns))
cat_feature_names = list(set(reference_data.select_dtypes([np.object]).columns) - set(utility_columns))
if prediction_column is not None and target_column is not None:
if production_data is not None:
additional_graphs_data = []
params_data = []
for feature_name in num_feature_names + cat_feature_names:
#add data for table in params
labels = sorted(set(reference_data[target_column]))
params_data.append(
{
"details": {
"parts": [{"title":"All", "id":"All" + "_" + str(feature_name)}] + [{"title":str(label), "id": feature_name + "_" + str(label)} for label in labels],
"insights": []
},
"f1": feature_name
}
)
#create confusion based plots
reference_data['dataset'] = 'Reference'
production_data['dataset'] = 'Current'
merged_data = | pd.concat([reference_data, production_data]) | pandas.concat |
import numpy as np
import pandas as pd
dict2={}
df = pd.read_csv('../Data/average1.csv')
dict1 = {col:df[col].tolist() for col in df.columns}
temp = []
for key in list(dict1.keys()):
if(key not in dict2.keys()):
dict2[key] = [0]*2
if(int(int(key)/100000)%10 == 8):
temp = dict1[key][34:54]+ dict1[key][59:70]
temp_mean = np.mean(temp)
temp_var = np.var(temp)
dict2[key][0] = temp_mean
dict2[key][1] = temp_var
else:
temp = dict1[key][0:2]+dict1[key][9:29]+dict1[key][34:54]+dict1[key][59:70]
temp_mean = np.mean(temp)
temp_var = np.var(temp)
dict2[key][0] = temp_mean
dict2[key][1] = temp_var
result = pd.DataFrame(columns = ['xh','mean','var'])
for key in list(dict2.keys()):
result = result.append( | pd.DataFrame({'xh':[key],'mean':[dict2[key][0]],'var':[dict2[key][1]]}) | pandas.DataFrame |
import torch
from torch import nn
import classification_ModelNet40.models as models
import torch.backends.cudnn as cudnn
from classification_ScanObjectNN.models import pointMLPElite
# from cell_dataset import PointCloudDatasetAllBoth
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
from foldingnet import ReconstructionNet, ChamferLoss
from angle_loss import AngleLoss
from dataset import (
PointCloudDatasetAllBoth,
PointCloudDatasetAllBothNotSpec,
PointCloudDatasetAllBothNotSpec1024,
PointCloudDatasetAllBothNotSpecRotation,
PointCloudDatasetAllBothNotSpecRotation1024,
PointCloudDatasetAllBothNotSpec2DRotation1024,
PointCloudDatasetAllBothKLDivergranceRotation1024
)
import argparse
import os
from tearing.folding_decoder import FoldingNetBasicDecoder
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import plotly.express as px
def extract_and_save(mod, dl, save_to, dataframe):
print("Extracting All")
from tqdm import tqdm
criterion = ChamferLoss()
inputs_test = []
outputs_test = []
embeddings_test = []
labels_test = []
serial_numbers = []
mus = []
log_vars = []
mod.eval()
loss = 0
for data in tqdm(dl):
with torch.no_grad():
pts, lab, serial_num = data
labels_test.append(lab.detach().numpy())
inputs = pts.to(device)
outs, mu, log_var, embeddings, z = mod(inputs.permute(0, 2, 1))
inputs_test.append(torch.squeeze(inputs).cpu().detach().numpy())
outputs_test.append(torch.squeeze(outs).cpu().detach().numpy())
embeddings_test.append(torch.squeeze(embeddings).cpu().detach().numpy())
serial_numbers.append(serial_num)
mus.append(torch.squeeze(mu).cpu().detach().numpy())
log_vars.append(torch.squeeze(log_var).cpu().detach().numpy())
loss += criterion(inputs, outs)
print(loss / len(dl))
folding_data = pd.DataFrame(np.asarray(embeddings_test))
folding_data["serialNumber"] = np.asarray(serial_numbers)
all_data = | pd.read_csv(dataframe) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import wx
import os
import time
import sys
# In[ ]:
input_max_temp = input("Please input maximum of temperature: ")
input_min_temp = input("Please input minimum of temperature: ")
input_meandew = input("Please input mean dew point: ")
input_meanhum = input("Please input mean humidity: ")
input_pressure = input("Please input mean pressure: ")
input_meancloud = input("Please input mean cloud: ")
input_rainfall = input("Please input mean rainfall: ")
input_population = input("Please input population density: ")
input_sunshine = input("Please input mean number of sunshine hour: ")
input_wind_dir = input("Please input mean wind direction: ")
input_wind_speed = input("Please input mean wind speed: ")
input_air_quality = input("Please input mean air health quality: ")
# In[ ]:
if (True):
# !/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as seabornInstance
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn import metrics
# In[2]:
dataset = pd.read_csv('data_2.csv')
# In[3]:
dataset.shape
# In[4]:
dataset.describe()
# In[5]:
dataset.isnull().any()
# In[6]:
dataset = dataset.fillna(method='ffill')
# In[7]:
dataset.plot(x='pressure', y='mean_temp', style='o')
plt.title('Pressure vs Mean Temperature')
plt.xlabel('pressure')
plt.ylabel('mean_temp')
plt.savefig("statistics/pressure.png")
plt.show()
dataset.plot(x='max_temp', y='mean_temp', style='o')
plt.title('Maximum Temperature vs Mean Temperature')
plt.xlabel('max_temp')
plt.ylabel('mean_temp')
plt.savefig("statistics/max_temp.png")
plt.show()
dataset.plot(x='min_temp', y='mean_temp', style='o')
plt.title('Minimum Temperature vs Mean Temperature')
plt.xlabel('min_temp')
plt.ylabel('mean_temp')
plt.savefig("statistics/min_temp.png")
plt.show()
dataset.plot(x='meandew', y='mean_temp', style='o')
plt.title('Mean Dew Point vs Mean Temperature')
plt.xlabel('meandew')
plt.ylabel('mean_temp')
plt.savefig("statistics/meandew.png")
plt.show()
dataset.plot(x='meanhum', y='mean_temp', style='o')
plt.title('Mean Humidity vs Mean Temperature')
plt.xlabel('meanhum')
plt.ylabel('mean_temp')
plt.savefig("statistics/meanhum.png")
plt.show()
dataset.plot(x='meancloud', y='mean_temp', style='o')
plt.title('Mean Cloud vs Mean Temperature')
plt.xlabel('meancloud')
plt.ylabel('mean_temp')
plt.savefig("statistics/meancloud.png")
plt.show()
dataset.plot(x='rainfall', y='mean_temp', style='o')
plt.title('Rainfall vs Mean Temperature')
plt.xlabel('rainfall')
plt.ylabel('mean_temp')
plt.savefig("statistics/rainfall.png")
plt.show()
dataset.plot(x='population', y='mean_temp', style='o')
plt.title('Population vs Mean Temperature')
plt.xlabel('Population')
plt.ylabel('mean_temp')
plt.savefig("statistics/population.png")
plt.show()
dataset.plot(x='sunshine_hour', y='mean_temp', style='o')
plt.title('Bright Sunshine vs Mean Temperature')
plt.xlabel('sunshine_hour')
plt.ylabel('mean_temp')
plt.savefig("statistics/sunshine.png")
plt.show()
dataset.plot(x='wind_direction', y='mean_temp', style='o')
plt.title('Wind Direction vs Mean Temperature')
plt.xlabel('wind_direction')
plt.ylabel('mean_temp')
plt.savefig("statistics/wind_direction.png")
plt.show()
dataset.plot(x='wind_speed', y='mean_temp', style='o')
plt.title('Wind Speed vs Mean Temperature')
plt.xlabel('wind_speed')
plt.ylabel('mean_temp')
plt.savefig("statistics/wind_speed.png")
plt.show()
dataset.plot(x='air_health_quality', y='mean_temp', style='o')
plt.title('Air Health Quality vs Mean Temperature')
plt.xlabel('air_health_quality')
plt.ylabel('mean_temp')
plt.savefig("statistics/air_quality.png")
plt.show()
# In[8]:
X = dataset[['pressure', 'max_temp', 'min_temp', 'meandew', 'meanhum', 'meancloud', 'rainfall', 'population',
'sunshine_hour', 'wind_direction', 'wind_speed', 'air_health_quality']]
y = dataset['mean_temp']
# In[9]:
plt.figure(figsize=(15, 10))
plt.tight_layout()
seabornInstance.distplot(dataset['mean_temp'])
# In[10]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# In[11]:
print("Linear Regression Prediction: ")
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# In[12]:
coeff_df = pd.DataFrame(regressor.coef_, X.columns, columns=['Coefficient'])
coeff_df.sort_values(by='Coefficient', ascending=False)
# In[13]:
pos_coeffs_df = coeff_df[(coeff_df['Coefficient'] >= 0)].sort_values(by='Coefficient', ascending=False)
# pos_coeffs_df.sort_values(by='Estimated_Coefficients', ascending=False)
pos_coeffs_df
# In[14]:
pos_coeffs_df = coeff_df[(coeff_df['Coefficient'] < 0)].sort_values(by='Coefficient', ascending=True)
# pos_coeffs_df.sort_values(by='Estimated_Coefficients', ascending=False)
pos_coeffs_df
# In[15]:
y_pred = regressor.predict(X_test)
# In[16]:
import seaborn as sns
g = sns.regplot(y_pred, y=y_test, fit_reg=True)
g.set(xlabel='Predicted Mean Temperature', ylabel='Actual Mean Temperature', title='Model Predictions')
plt.title('Regression Plot for Actual vs Predicted Values')
# In[17]:
df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
df1 = df.head(25)
df1
# In[18]:
df1.plot(kind='bar', figsize=(10, 8))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.savefig("statistics/linear_regression_comparison.png")
plt.show()
# In[19]:
# R2 for train and test data
R2_reg_train = regressor.score(X_train, y_train)
R2_reg_test = regressor.score(X_test, y_test)
print('R squared for train data is: %.3f' % (R2_reg_train))
print('R squared for test data is: %.3f' % (R2_reg_test))
# In[20]:
from math import sqrt
RMSE_reg_train = sqrt(np.mean((y_train - regressor.predict(X_train)) ** 2))
RMSE_reg_test = sqrt(np.mean((y_test - regressor.predict(X_test)) ** 2))
print('Root mean squared error for train data is: %.3f' % (RMSE_reg_train))
print('Root mean sqaured error for test data is: %.3f' % (RMSE_reg_test))
# In[21]:
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
# In[27]:
# input_pressure = 1000
# input_max_temp = 30
# input_min_temp = 25
# input_meandew = 25
# input_meanhum = 80
estimated_temp = regressor.predict([[float(input_pressure),float(input_max_temp),float(input_min_temp),float(input_meandew),float(input_meanhum),float(input_meancloud),float(input_rainfall),int(input_population),float(input_sunshine),float(input_wind_dir),float(input_wind_speed),float(input_air_quality)]])
print ("The expected mean of temperature is", estimated_temp)
print(" ")
print("K-Nearest Neighbors Prediction: ")
knn = KNeighborsRegressor(n_neighbors=3)
knn.fit(X_train, y_train)
# In[12]:
pred_knn = knn.predict(X_test)
pred_knn
y_pred = knn.predict(X_test)
# In[16]:
import seaborn as sns
g = sns.regplot(y_pred, y=y_test, fit_reg=True)
g.set(xlabel='Predicted Mean Temperature', ylabel='Actual Mean Temperature', title='Model Predictions')
plt.title('Regression Plot for Actual vs Predicted Values')
# In[17]:
df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
df1 = df.head(25)
df1
# In[18]:
df1.plot(kind='bar', figsize=(10, 8))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.savefig("statistics/KNN_comparison.png")
plt.show()
# In[19]:
# R2 for train and test data
# R2 for train and test data
R2_reg_train = knn.score(X_train, y_train)
R2_reg_test = knn.score(X_test, y_test)
print('R squared for train data is: %.3f' % (R2_reg_train))
print('R squared for test data is: %.3f' % (R2_reg_test))
# In[20]:
from math import sqrt
RMSE_reg_train = sqrt(np.mean((y_train - knn.predict(X_train)) ** 2))
RMSE_reg_test = sqrt(np.mean((y_test - knn.predict(X_test)) ** 2))
print('Root mean squared error for train data is: %.3f' % (RMSE_reg_train))
print('Root mean sqaured error for test data is: %.3f' % (RMSE_reg_test))
# In[21]:
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
# In[27]:
# input_pressure = 1000
# input_max_temp = 30
# input_min_temp = 25
# input_meandew = 25
# input_meanhum = 80
estimated_temp = knn.predict([[float(input_pressure),float(input_max_temp),float(input_min_temp),float(input_meandew),float(input_meanhum),float(input_meancloud),float(input_rainfall),int(input_population),float(input_sunshine),float(input_wind_dir),float(input_wind_speed),float(input_air_quality)]])
print ("The expected mean of temperature is", estimated_temp)
# In[ ]:
print(" ")
print("Random Forest Regression Prediction: ")
rf = RandomForestRegressor(random_state=5, n_estimators=20)
rf.fit(X_train, y_train)
# In[12]:
pred_rf = rf.predict(X_test)
pred_rf
y_pred = rf.predict(X_test)
# In[16]:
import seaborn as sns
g = sns.regplot(y_pred, y=y_test, fit_reg=True)
g.set(xlabel='Predicted Mean Temperature', ylabel='Actual Mean Temperature', title='Model Predictions')
plt.title('Regression Plot for Actual vs Predicted Values')
# In[17]:
df = | pd.DataFrame({'Actual': y_test, 'Predicted': y_pred}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import datetime as datetime
from scipy.signal import find_peaks, peak_prominences
from scipy.interpolate import interp1d
from scipy import signal
from scipy.integrate import trapz
'''
Feature Engineering of Wearable Sensors:
Metrics computed:
Mean Heart Rate Variability
Median Heart Rate Variability
Maximum Heart Rate Variability
Minimum Heart Rate Variability
SDNN (HRV)
RMSSD (HRV)
NNx (HRV)
pNNx (HRV)
HRV Frequency Domain Metrics:
PowerVLF
PowerLF
PowerHF
PowerTotal
LF/HF
PeakVLF
PeakLF
PeakHF
FractionLF
FractionHF
EDA Peaks
Activity Bouts
Interday Summary:
Interday Mean
Interday Median
Interday Maximum
Interday Minimum
Interday Quartile 1
Interday Quartile 3
Interday Standard Deviation
Interday Coefficient of Variation
Intraday Standard Deviation (mean, median, standard deviation)
Intraday Coefficient of Variation (mean, median, standard deviation)
Intraday Mean (mean, median, standard deviation)
Daily Mean
Intraday Summary:
Intraday Mean
Intraday Median
Intraday Minimum
Intraday Maximum
Intraday Quartile 1
Intraday Quartile 3
TIR (Time in Range of default 1 SD)
TOR (Time outside Range of default 1 SD)
POR (Percent outside Range of default 1 SD)
MASE (Mean Amplitude of Sensor Excursions, default 1 SD)
Hours from Midnight (circadian rhythm feature)
Minutes from Midnight (ciracadian rhythm feature)
'''
def e4import(filepath, sensortype, Starttime='NaN', Endtime='NaN', window='5min'): #window is in seconds
"""
brings in an empatica compiled file **this is not raw empatica data**
Args:
filepath (String): path to file
sensortype (Sting): Options: 'EDA', 'HR', 'ACC', 'TEMP', 'BVP'
Starttime (String): (optional, default arg = 'NaN') format '%Y-%m-%d %H:%M:%S.%f', if you want to only look at data after a specific time
Endtime (String): (optional, default arg = 'NaN') format '%Y-%m-%d %H:%M:%S.%f', if you want to only look at data before a specific time
window (String): default '5min'; this is the window your data will be resampled on.
Returns:
(pd.DataFrame): dataframe of data with Time, Mean, Std columns
"""
if sensortype == 'ACC':
data = pd.read_csv(filepath,header=None, names = ["Time", "x", "y", "z"])
data['Var'] = np.sqrt(data['x']**2 + data['y']**2 + data['z']**2)
data = data.drop(columns=['x', 'y', 'z'])
else:
data = pd.read_csv(filepath, header=None, names=['Time', 'Var'])
data['Time'] = pd.to_datetime(data['Time'], format='%Y-%m-%d %H:%M:%S.%f')
if Starttime != 'NaN':
VarData = data.loc[data.loc[:, 'Time'] >= Starttime, :]
if Endttime != 'NaN':
VarData = VarData.loc[VarData.loc[:, 'Time'] <= Endtime, :]
else:
VarData = data
Data = pd.DataFrame()
Data[[(sensortype + '_Mean')]] = VarData.resample(window, on='Time').mean()
Data[[(sensortype + '_Std')]] = VarData.resample(window, on='Time').std()
Data = Data.reset_index()
print((sensortype + ' Import and Resample Complete'))
return(Data)
def HRV(time, IBI, ibimultiplier = 1000):
"""
computes Heart Rate Variability metrics
Args:
time (pandas.DataFrame column or pandas series): time column
IBI (pandas.DataFrame column or pandas series): column with inter beat intervals
ibimultiplier (IntegerType): defualt = 1000; transforms IBI to milliseconds. If data is already in ms, set as 1
Returns:
maxHRV (FloatType): maximum HRV
minHRV (FloatType): minimum HRV
meanHRV (FloatType): mean HRV
medianHRV(FloatType): median HRV
"""
time = time
ibi = IBI*ibimultiplier
maxHRV = round(max(ibi) * 10) / 10
minHRV = round(min(ibi) * 10) / 10
meanHRV = round(np.mean(ibi) * 10) / 10
medianHRV = round(np.median(ibi) * 10) / 10
return maxHRV, minHRV, meanHRV, medianHRV
def SDNN(time, IBI, ibimultiplier=1000):
"""
computes Heart Rate Variability metric SDNN
Args:
time (pandas.DataFrame column or pandas series): time column
IBI (pandas.DataFrame column or pandas series): column with inter beat intervals
ibimultiplier (IntegerType): defualt = 1000; transforms IBI to milliseconds. If data is already in ms, set as 1
Returns:
SDNN (FloatType): standard deviation of NN intervals
"""
time = time
ibi = IBI*ibimultiplier
SDNN = round(np.sqrt(np.var(ibi, ddof=1)) * 10) / 10
return SDNN
def RMSSD(time, IBI, ibimultiplier=1000):
"""
computes Heart Rate Variability metric RMSSD
Args:
time (pandas.DataFrame column or pandas series): time column
IBI (pandas.DataFrame column or pandas series): column with inter beat intervals
ibimultiplier (IntegerType): defualt = 1000; transforms IBI to milliseconds. If data is already in ms, set as 1
Returns:
RMSSD (FloatType): root mean square of successive differences
"""
time = time
ibi = IBI*ibimultiplier
differences = abs(np.diff(ibi))
rmssd = np.sqrt(np.sum(np.square(differences)) / len(differences))
return round(rmssd * 10) / 10
def NNx(time, IBI, ibimultiplier=1000, x=50):
"""
computes Heart Rate Variability metrics NNx and pNNx
Args:
time (pandas.DataFrame column or pandas series): time column
IBI (pandas.DataFrame column or pandas series): column with inter beat intervals
ibimultiplier (IntegerType): defualt = 1000; transforms IBI to milliseconds. If data is already in ms, set as 1
x (IntegerType): default = 50; set the number of times successive heartbeat intervals exceed 'x' ms
Returns:
NNx (FloatType): the number of times successive heartbeat intervals exceed x ms
pNNx (FloatType): the proportion of NNx divided by the total number of NN (R-R) intervals.
"""
time = time
ibi = IBI*ibimultiplier
differences = abs(np.diff(ibi))
n = np.sum(differences > x)
p = (n / len(differences)) * 100
return (round(n * 10) / 10), (round(p * 10) / 10)
def FrequencyHRV(IBI, ibimultiplier=1000, fs=1):
"""
computes Heart Rate Variability frequency domain metrics
Args:
IBI (pandas.DataFrame column or pandas series): column with inter beat intervals
ibimultiplier (IntegerType): defualt = 1000; transforms IBI to milliseconds. If data is already in ms, set as 1
fs (IntegerType): Optional sampling frequency for frequency interpolation (default=1)
Returns:
(dictionary): dictionary of frequency domain HRV metrics with keys:
PowerVLF (FloatType): Power of the Very Low Frequency (VLF): 0-0.04Hz band
PowerLF (FloatType): Power of the Low Frequency (LF): 0.04-0.15Hz band
PowerHF (FloatType): Power of the High Frequency (HF): 0.15-0.4Hz band
PowerTotal (FloatType):Total power over all frequency bands
LF/HF (FloatType): Ratio of low and high power
Peak VLF (FloatType): Peak of the Very Low Frequency (VLF): 0-0.04Hz band
Peak LF (FloatType): Peak of the Low Frequency (LF): 0.04-0.15Hz band
Peak HF (FloatType): Peak of the High Frequency (HF): 0.15-0.4Hz band
FractionLF (FloatType): Fraction that is low frequency
FractionHF (FloatType): Fraction that is high frequency
"""
ibi = IBI*ibimultiplier
steps = 1 / fs
# create interpolation function based on the rr-samples.
x = np.cumsum(ibi) / 1000.0
f = interp1d(x, ibi, kind='cubic')
# sample from interpolation function
xx = np.arange(1, np.max(x), steps)
ibi_interpolated = f(xx)
fxx, pxx = signal.welch(x=ibi_interpolated, fs=fs)
'''
Segement found frequencies in the bands
- Very Low Frequency (VLF): 0-0.04Hz
- Low Frequency (LF): 0.04-0.15Hz
- High Frequency (HF): 0.15-0.4Hz
'''
cond_vlf = (fxx >= 0) & (fxx < 0.04)
cond_lf = (fxx >= 0.04) & (fxx < 0.15)
cond_hf = (fxx >= 0.15) & (fxx < 0.4)
# calculate power in each band by integrating the spectral density
vlf = trapz(pxx[cond_vlf], fxx[cond_vlf])
lf = trapz(pxx[cond_lf], fxx[cond_lf])
hf = trapz(pxx[cond_hf], fxx[cond_hf])
# sum these up to get total power
total_power = vlf + lf + hf
# find which frequency has the most power in each band
peak_vlf = fxx[cond_vlf][np.argmax(pxx[cond_vlf])]
peak_lf = fxx[cond_lf][np.argmax(pxx[cond_lf])]
peak_hf = fxx[cond_hf][np.argmax(pxx[cond_hf])]
# fraction of lf and hf
lf_nu = 100 * lf / (lf + hf)
hf_nu = 100 * hf / (lf + hf)
results = {}
results['PowerVLF'] = round(vlf, 2)
results['PowerLF'] = round(lf, 2)
results['PowerHF'] = round(hf, 2)
results['PowerTotal'] = round(total_power, 2)
results['LF/HF'] = round(lf / hf, 2)
results['PeakVLF'] = round(peak_vlf, 2)
results['PeakLF'] = round(peak_lf, 2)
results['PeakHF'] = round(peak_hf, 2)
results['FractionLF'] = round(lf_nu, 2)
results['FractionHF'] = round(hf_nu, 2)
return results
def PeaksEDA(eda, time):
"""
calculates peaks in the EDA signal
Args:
eda (pandas.DataFrame column or pandas series): eda column
time (pandas.DataFrame column or pandas series): time column
Returns:
countpeaks (IntegerType): the number of peaks total
peakdf (pandas.DataFrame): a pandas dataframe with time and peaks to easily integrate with your data workflow
"""
EDAy = eda.to_numpy()
EDAx = time.to_numpy()
peaks, _ = find_peaks(EDAy, height=0, distance=4, prominence=0.3)
peaks_x = []
for i in peaks:
px = time.iloc[i]
peaks_x.append(px)
peakdf = pd.DataFrame()
peakdf['Time'] = peaks_x
peakdf['Peak'] = ([1]*len(peaks_x))
countpeaks = len(peakdf)
return countpeaks, peakdf
def exercisepts(acc, hr, time): #acc and hr must be same length, acc must be magnitude
"""
calculates activity bouts using accelerometry and heart rate
Args:
acc (pandas.DataFrame column or pandas series): accelerometry column
hr (pandas.DataFrame column or pandas series): heart rate column
time (pandas.DataFrame column or pandas series): time column
Returns:
countbouts (IntegerType): the number of acitvity bouts total
returndf (pandas.DataFrame): a pandas dataframe with time and activity bouts (designated as a '1') to easily integrate with your data workflow
"""
exercisepoints = []
for z in range(len(acc)):
if acc[z] > np.mean(acc[0:z]):
if hr[z] > np.mean(hr[0:z]):
exercisepoints.append(1)
else:
exercisepoints.append(0)
else:
exercisepoints.append(0)
returndf = pd.DataFrame()
returndf['Time'] = time
returndf['Activity Bouts'] = exercisepoints
countbouts = len(exercisepoints)
return countbouts, returndf
def interdaycv(column):
"""
computes the interday coefficient of variation on pandas dataframe Sensor column
Args:
column (pandas.DataFrame column or pandas series): column that you want to calculate over
Returns:
cvx (IntegerType): interday coefficient of variation
"""
cvx = (np.std(column) / (np.nanmean(column)))*100
return cvx
def interdaysd(column):
"""
computes the interday standard deviation of pandas dataframe Sensor column
Args:
column (pandas.DataFrame column or pandas series): column that you want to calculate over
Returns:
interdaysd (IntegerType): interday standard deviation
"""
interdaysd = np.std(column)
return interdaysd
def intradaycv(column, time, timeformat='%Y-%m-%d %H:%M:%S.%f'):
"""
computes the intradaycv, returns the mean, median, and sd of intraday cv Sensor column in pandas dataframe
Args:
column (pandas.DataFrame column or pandas series): column that you want to calculate over
time (pandas.DataFrame): time column
timeformat (String): default = '%Y-%m-%d %H:%M:%S.%f'; format of timestamp in time column
Returns:
intradaycv_mean (IntegerType): Mean, Median, and SD of intraday coefficient of variation
intradaycv_median (IntegerType): Median of intraday coefficient of variation
intradaycv_sd (IntegerType): SD of intraday coefficient of variation
Requires:
interdaycv() function
"""
intradaycv = []
df = pd.DataFrame()
df['Column'] = column
df['Time'] = pd.to_datetime(time, format=timeformat)
df['Day'] = df['Time'].dt.date
for i in pd.unique(df['Day']):
intradaycv.append(interdaycv(df[df['Day']==i]['Column']))
intradaycv_mean = np.mean(intradaycv)
intradaycv_median = np.median(intradaycv)
intradaycv_sd = np.std(intradaycv)
return intradaycv_mean, intradaycv_median, intradaycv_sd
def intradaysd(column, time, timeformat='%Y-%m-%d %H:%M:%S.%f'):
"""
computes the intradaysd, returns the mean, median, and sd of intraday sd Sensor column in pandas dataframe
Args:
column (pandas.DataFrame column or pandas series): column that you want to calculate over
time (pandas.DataFrame): time column
timeformat (String): default = '%Y-%m-%d %H:%M:%S.%f'; format of timestamp in time column
Returns:
intradaysd_mean (IntegerType): Mean, Median, and SD of intraday standard deviation
intradaysd_median (IntegerType): Median of intraday standard deviation
intradaysd_sd (IntegerType): SD of intraday standard deviation
"""
intradaysd =[]
df = pd.DataFrame()
df['Column'] = column
df['Time'] = pd.to_datetime(time, format=timeformat)
df['Day'] = df['Time'].dt.date
for i in pd.unique(df['Day']):
intradaysd.append(np.std(df[df['Day']==i]['Column']))
intradaysd_mean = np.mean(intradaysd)
intradaysd_median = np.median(intradaysd)
intradaysd_sd = np.std(intradaysd)
return intradaysd_mean, intradaysd_median, intradaysd_sd
def intradaymean(column, time, timeformat='%Y-%m-%d %H:%M:%S.%f'):
"""
computes the intradaymean, returns the mean, median, and sd of the intraday mean of the Sensor data
Args:
column (pandas.DataFrame column or pandas series): column that you want to calculate over
time (pandas.DataFrame): time column
timeformat (String): default = '%Y-%m-%d %H:%M:%S.%f'; format of timestamp in time column
Returns:
intradaysd_mean (IntegerType): Mean, Median, and SD of intraday standard deviation of glucose
intradaysd_median (IntegerType): Median of intraday standard deviation of glucose
intradaysd_sd (IntegerType): SD of intraday standard deviation of glucose
"""
intradaymean =[]
df = pd.DataFrame()
df['Column'] = column
df['Time'] = pd.to_datetime(time, format=timeformat)
df['Day'] = df['Time'].dt.date
for i in pd.unique(df['Day']):
intradaymean.append(np.nanmean(df[df['Day']==i]['Column']))
intradaymean_mean = np.mean(intradaymean)
intradaymean_median = np.median(intradaymean)
intradaymean_sd = np.std(intradaymean)
return intradaymean_mean, intradaymean_median, intradaymean_sd
def dailymean(column, time, timeformat='%Y-%m-%d %H:%M:%S.%f'):
"""
computes the mean of each day
Args:
column (pandas.DataFrame column or pandas series): column that you want to calculate over
time (pandas.DataFrame): time column
timeformat (String): default = '%Y-%m-%d %H:%M:%S.%f'; format of timestamp in time column
Returns:
pandas.DataFrame with days and means as columns
"""
intradaymean =[]
df = pd.DataFrame()
df['Column'] = column
df['Time'] = pd.to_datetime(time, format=timeformat)
df['Day'] = df['Time'].dt.date
for i in pd.unique(df['Day']):
intradaymean.append(np.nanmean(df[df['Day']==i]['Column']))
dailymeandf = pd.DataFrame()
dailymeandf['Day'] = pd.unique(df['Day'])
dailymeandf['Mean'] = intradaymean
return dailymeandf
def dailysummary(column, time, timeformat='%Y-%m-%d %H:%M:%S.%f'):
"""
computes the summary of each day (mean, median, std, max, min, Q1G, Q3G)
Args:
column (pandas.DataFrame column or pandas series): column that you want to calculate over
time (pandas.DataFrame): time column
timeformat (String): default = '%Y-%m-%d %H:%M:%S.%f'; format of timestamp in time column
Returns:
pandas.DataFrame with days and summary metrics as columns
"""
intradaymean =[]
intradaymedian =[]
intradaysd =[]
intradaymin =[]
intradaymax =[]
intradayQ1 =[]
intradayQ3 =[]
df = pd.DataFrame()
df['Column'] = column
df['Time'] = pd.to_datetime(time, format=timeformat)
df['Day'] = df['Time'].dt.date
for i in pd.unique(df['Day']):
intradaymean.append(np.nanmean(df[df['Day']==i]['Column']))
intradaymedian.append(np.nanmedian(df[df['Day']==i]['Column']))
intradaysd.append(np.std(df[df['Day']==i]['Column']))
intradaymin.append(np.nanmin(df[df['Day']==i]['Column']))
intradaymax.append(np.nanmax(df[df['Day']==i]['Column']))
intradayQ1.append(np.nanpercentile(df[df['Day']==i]['Column'], 25))
intradayQ3.append(np.nanpercentile(df[df['Day']==i]['Column'], 75))
dailysumdf = pd.DataFrame()
dailysumdf['Day'] = pd.unique(df['Day'])
dailysumdf['Mean'] = intradaymean
dailysumdf['Median'] = intradaymedian
dailysumdf['Standard Deviation'] = intradaysd
dailysumdf['Minimum'] = intradaymin
dailysumdf['Maximum'] = intradaymax
dailysumdf['Quartile 1'] = intradayQ1
dailysumdf['Quartile 3'] = intradayQ3
return dailysumdf
def interdaysummary(column, dataframe=True):
"""
computes interday mean, median, minimum and maximum, and first and third quartile over a column
Args:
column (pandas.DataFrame column or pandas series): column that you want to calculate over
dataframe (True/False): default=True; whether you want a pandas DataFrame as an output or each of the summary metrics as IntegerTypes
Returns:
pandas.DataFrame with columns: Mean, Median, Standard Deviation, Minimum, Maximum, Quartile 1, Quartile 3
or
interdaymean (FloatType): mean
interdaymedian (FloatType): median
interdaysd (FloatType) : standard deviation
interdaymin (FloatType): minimum
interdaymax (FloatType): maximum
interdayQ1 (FloatType): first quartile
interdayQ3 (FloatType): third quartile
"""
interdaymean = np.nanmean(column)
interdaymedian = np.nanmedian(column)
interdaysd = np.std(column)
interdaymin = np.nanmin(column)
interdaymax = np.nanmax(column)
interdayQ1 = np.nanpercentile(column, 25)
interdayQ3 = np.nanpercentile(column, 75)
interdaysum = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 3 00:44:36 2022
@author: filot
Create timeseries
"""
import pandas as pd
import glob
# Standard Library imports
import argparse
import gzip
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import netCDF4
import numpy as np
import os
import pandas as pd
import sys
import xarray as xr
# Third party imports
from collections import OrderedDict
from datetime import datetime
from sklearn import linear_model
from scipy.stats import spearmanr
from scipy.stats import pearsonr
# Semi-local imports
import name_qch4_couple.io
import name_qch4_couple.name
import name_qch4_couple.plot_h2
# Local imports
import routines
import chem_ch4_validation
import chem_co
#os.chdir('C:/Users/filot/Desktop/YEAR_4/Dissertation/Ed_new_script/scripts')
date = "2018-01"
# Dates
dates_tHour = pd.date_range(
pd.to_datetime(date),
pd.to_datetime(date) + | pd.DateOffset(months=12) | pandas.DateOffset |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
## Converts h5 input to short format
## By: <NAME>
## Bring in system mod
import sys
# In[ ]:
## Set user defined variables
## Check we have three inputs!
assert (len(sys.argv) >= 4), "ERROR: This script must include:\n(1) The full path to a ginteractions (tsv) file (which is assumed to be an h5 matrix converted via HicExplorer).\n(2) A genome size (tsv) file with chromosome and size columns.\n(3) A valid output path to save the hic short file."
## Gather data inputs
datapath = str(sys.argv[1])
sizepath = str(sys.argv[2])
savepath = str(sys.argv[3])
## Set verbosity if passed
if (len(sys.argv) == 5):
if str(sys.argv[4]) == 'true':
verbose = True
else:
verbose = False
else:
verbose = False
# ## Set user defined variables
# ## Set input path
# datapath = '/Users/croth/HIC/MRC5/2401.006.h5.toremove.ginteractions.tsv'
#
# ## Set output path
# savepath = '/Users/croth/HIC/MRC5/2401.006.h5.toremove.short'
#
# ## Set path to size file
# sizepath = '/Users/croth/REFERENCES/ENCODE/genome.size.txt'
# #sizepath = '/Users/croth/REFERENCES/ENCODE/test1.size.txt'
# #sizepath = '/Users/croth/REFERENCES/ENCODE/test2.size.txt'
#
# ## Set verbose
# verbose = False
# In[ ]:
## Set other needed variables
## Set verbosity
#verbose = True
## Set input sep
mysep = '\t'
## Set output output sep
outsep = ' '
## Set column names
colname = ['Chrom1','Left1','Right1','Chrom2','Left2','Right2','Quality']
# In[ ]:
## Bring in needed mods
import pandas as pd, numpy as np
## Write a ftn to check index between two dataframes
def checkix(x,y):
x = np.array(sorted(x.index.values))
y = np.array(sorted(y.index.values))
assert (np.sum(x-y) == 0), "ERROR: The indices of the dataframes to not match!"
# In[ ]:
## Load in genomesize and contact data
## Log if verbose
if verbose:
print("Loading genome size and contact (h5) files.")
## Load genome size file
genomesize = | pd.read_csv(sizepath,sep=mysep,names=['Chrom','Size']) | pandas.read_csv |
import numpy as np
import cv2
import subprocess
import argparse
import os
import sys
from datetime import datetime
import time
from math import sqrt, pi, cos, sin
import pandas as pd
from PIL import Image
import matplotlib.pyplot as plt
from train import process_image, model
oshapeX = 640
oshapeY = 240
NUM_CLASSES = 3
shapeX = 320
shapeY = 120
cshapeY = 80
parser = argparse.ArgumentParser(description='Recorder')
parser.add_argument(
'img_dir',
type=str,
default='ts_0',
help='Name of the training set folder. Default: ts_0'
)
parser.add_argument(
'--fps',
type=int,
default=30,
help='FPS (Frames per second) setting for the video.'
)
parser.add_argument(
'-out_dir',
type=str,
default='',
help='Name of the output folder in the out_dir. Default: None'
)
parser.add_argument(
'-model',
type=str,
default='',
help='Path to model h5 file. Model should be on the same path.'
)
parser.add_argument(
'-correct',
type=int,
default=0,
help='Correct recorded csv data 0 - off/ 1 - on.'
)
parser.add_argument(
'-detect',
type=int,
default=0,
help='Detection of data close to misclassification 0 - off/ 1 - on.'
)
parser.add_argument(
'-class_err',
type=float,
default=1.0,
help='Detection of misclassifications exceeding error. Default: 1.0.'
)
parser.add_argument(
'-miss_err',
type=float,
default=.66,
help='Threshold value for detection of disperse predictions. Default: 0.66.'
)
args = parser.parse_args()
# Data folder name annd path
full_path = os.path.dirname(os.path.realpath(sys.argv[0])) + "/"
img_path = "./data_sets/" + args.img_dir + "/" + "data/"
if args.out_dir == "":
saving = False
print("Warning: not recording this run")
else:
saving = True
if not os.path.exists("./out_dir/"):
os.mkdir("./out_dir")
out_dir = "./out_dir/" + args.out_dir + "/"
if not os.path.exists(out_dir):
os.mkdir(out_dir)
data_dir = "./model_data/"
if not os.path.exists(data_dir):
os.mkdir(data_dir)
# Model params
predict = False
detect = False
class_err = args.class_err
miss_err = args.miss_err
if args.model:
shape = (shapeY, shapeX, 3)
model = model(True, shape, args.model)
predict = True
if args.detect:
detect = True
err = 0
correct = args.correct
# Init steering params
st_min_val = 0.48
st_mid_val = 1.51
st_max_val = 2.90
st_curr = 0
# Internal params
tries = 0
t0 = 0.0
r = float(oshapeY) / 4.0
r_sq = r * r
d = 2 * r
max_angle = pi / 4.0
# Read csv from file into dataframe
# if predict == False:
# df = pd.read_csv("./data_sets/" + args.img_dir + "/" +\
# "out.csv", names=['img_name', 'command'])
# ind = 0
# else:
df = | pd.read_csv(data_dir + args.img_dir +\
'_log.csv' , names=['img_name', 'command']) | pandas.read_csv |
def removeMissing(filename):
"""Takes a file that contains missing scans and removes those rows, while providing the subject name and reason for removal."""
import pandas as pd
import math
loaded_file = pd.read_csv(filename)
cleaned_list = []
missing_counter = 0
for row in loaded_file.index:
if math.isnan(loaded_file.iloc[row, 3]):
print("Dropping subject scan " + loaded_file.iloc[row, 0] + " because of " + loaded_file.iloc[row,1])
missing_counter = missing_counter + 1
else:
cleaned_list.append(loaded_file.iloc[row])
print("There were " + str(missing_counter) + " scans with missing data dropped.")
cleaned_df = | pd.DataFrame(cleaned_list) | pandas.DataFrame |
#%%
import time
from pathlib import Path
import colorcet as cc
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from graspologic.plot import pairplot
from sparse_decomposition import SparseComponentAnalysis
from sparse_decomposition.utils import calculate_explained_variance_ratio
from sparse_new_basis.data import load_scRNAseq
from sparse_new_basis.plot import savefig, set_theme
set_theme()
fig_dir = Path("sparse_new_basis/results/gene_sca_1.1")
def stashfig(name, *args, **kwargs):
savefig(fig_dir, name, *args, **kwargs)
#%%
sequencing_df, annotation_df = load_scRNAseq(fillna=True)
#%% throw out some genes with low variance
X = sequencing_df.values.copy()
var_thresh = VarianceThreshold(threshold=0.01)
X = var_thresh.fit_transform(X)
gene_index = sequencing_df.columns
gene_index = gene_index[var_thresh.get_support()]
#%%
# TODO plot the distribution of frequencies by cell type
#%%
neuron_index = sequencing_df.index
y = sequencing_df.index.get_level_values(level="Neuron_type").values
# stratify=y will try to set the distribution of class labels the same for train/test
X_train, X_test, index_train, index_test = train_test_split(
X, neuron_index, stratify=y, train_size=2 ** 14
)
#%%
n_components = 20
max_iter = 10
# gamma = 20
gamma = 100
sca_params = f"-n_components={n_components}-max_iter={max_iter}-gamma={gamma}"
pca_params = f"-n_components={n_components}"
#%% center and scale training data
currtime = time.time()
scaler = StandardScaler(with_mean=True, with_std=True, copy=False)
X_train = scaler.fit_transform(X_train)
print(f"{time.time() - currtime:.3f} elapsed to scale and center data.")
#%%
currtime = time.time()
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X_train)
print(f"{time.time() - currtime:.3f} elapsed to fit PCA model.")
#%%
currtime = time.time()
sca = SparseComponentAnalysis(n_components=n_components, max_iter=max_iter, gamma=gamma)
X_sca = sca.fit_transform(X_train)
print(f"{time.time() - currtime:.3f} elapsed to fit SCA model.")
#%%
max_iter = 5
gammas = [n_components, 100, 500, np.sqrt(X_train.shape[1]) * n_components, np.inf]
models_by_gamma = {}
Xs_by_gamma = {}
for i, gamma in enumerate(gammas):
print(f"Gamma = {gamma}...")
if gamma == np.inf:
_max_iter = 0
else:
_max_iter = max_iter
currtime = time.time()
sca = SparseComponentAnalysis(
n_components=n_components, max_iter=_max_iter, gamma=gamma
)
X_sca = sca.fit_transform(X_train)
print(f"{time.time() - currtime:.3f} elapsed.")
models_by_gamma[gamma] = sca
Xs_by_gamma[gamma] = X_sca
print()
#%%
rows = []
for gamma, model in models_by_gamma.items():
explained_variance_ratio = model.explained_variance_ratio_
for k, ev in enumerate(explained_variance_ratio):
n_nonzero = np.count_nonzero(model.components_[: k + 1])
rows.append(
{
"gamma": gamma,
"explained_variance": ev,
"n_components": k + 1,
"n_nonzero": n_nonzero,
}
)
scree_df = pd.DataFrame(rows)
#%% screeplots, basically
# n_models = len(models_by_gamma)
# scree_df = pd.DataFrame(index=range(n_components))
# scree_df["n_components"] = np.tile(np.arange(1, n_components + 1), 2)
# scree_df["explained_variance"] = np.concatenate(
# (np.cumsum(pca.explained_variance_ratio_), sca.explained_variance_ratio_)
# )
# scree_df["method"] = n_components * ["PCA"] + n_components * ["SCA"]
palette = dict(zip(gammas, sns.color_palette("deep", 10)))
gammas[:-1]
#%%
blue_shades = sns.color_palette("Blues", n_colors=len(gammas))[1:]
palette = dict(zip(gammas[:-1], blue_shades))
red_shades = sns.color_palette("Reds", n_colors=len(gammas))[1:]
palette[np.inf] = red_shades[-1]
# sns.color_palette("ch:start=.2,rot=-.3", as_cmap=False, n_colors=len(gammas) - 1)
#%%
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
sns.lineplot(
data=scree_df,
x="n_components",
y="explained_variance",
hue="gamma",
ax=ax,
marker="o",
palette=palette,
)
ax.get_legend().remove()
ax.legend(bbox_to_anchor=(1, 1), loc="upper left", title="Gamma")
# ax.legend().set_title("Gamma")
ax.set(ylabel="Cumulative explained variance", xlabel="# of PCs")
ax.yaxis.set_major_locator(plt.MaxNLocator(3))
ax.xaxis.set_major_locator(plt.IndexLocator(base=5, offset=-1))
stashfig("screeplot")
#%%
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
sns.lineplot(
data=scree_df,
x="n_nonzero",
y="explained_variance",
hue="gamma",
ax=ax,
marker="o",
palette=palette,
)
ax.get_legend().remove()
ax.legend(bbox_to_anchor=(1, 1), loc="upper left", title="Gamma")
# ax.legend().set_title("Gamma")
ax.set(ylabel="Cumulative explained variance", xlabel="# nonzero elements")
plt.xscale("log")
ax.yaxis.set_major_locator(plt.MaxNLocator(3))
# ax.xaxis.set_major_locator(plt.IndexLocator(base=5, offset=-1))
stashfig("screeplot-by-params")
#%%
neuron_types = index_train.get_level_values("Neuron_type").values
neuron_type_palette = dict(zip(np.unique(neuron_types), cc.glasbey_light))
n_show = 5
def make_plot_df(X, labels=None):
columns = [f"Dimension {i+1}" for i in range(X.shape[1])]
plot_df = | pd.DataFrame(data=X, columns=columns) | pandas.DataFrame |
import pandas as pd
import numpy as np
import tkinter as tk
from tkinter import filedialog
Response=pd.read_json("1.json",encoding="UTF-8")
carList=Response["response"]["classifieds"]
df=pd.DataFrame(carList)
for each in range(2,295):
try:
Response=pd.read_json(str(each)+".json",encoding="UTF-8")
carList=Response["response"]["classifieds"]
df2= | pd.DataFrame(carList) | pandas.DataFrame |
import os
import yaml
import argparse
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
import h5py
def predict_example_hdf5_file(cfgs):
dup_num = 14
csv_data = pd.read_csv(cfgs['Testing']['pred_csv'], dtype = {'key': str})
csv_output_data = | pd.DataFrame(columns=['key', 'pred_idx', 'prob_idx']) | pandas.DataFrame |
import argparse
import datetime as dt
from glob import glob
from math import ceil
import json
import os.path
from pathlib import Path
import enaml
with enaml.imports():
from enaml.stdlib.message_box import information
from enaml.qt.qt_application import QtApplication
import matplotlib as mp
import matplotlib.pyplot as pl
import numpy as np
import pandas as pd
from tqdm import tqdm
from psi.data.io import abr
from psi import get_config
COLUMNS = ['frequency', 'level', 'polarity']
def get_file_template(filename, offset, duration, filter_settings, n_epochs,
prefix='ABR', simple_filename=True,
include_filename=True):
if prefix:
prefix += ' '
if simple_filename:
return f'{prefix}{{}}'
base_string = f'{prefix}{offset*1e3:.1f}ms to {(offset+duration)*1e3:.1f}ms'
if n_epochs:
base_string = f'{base_string} {n_epochs} averages'
fh = abr.load(filename)
if filter_settings == 'saved':
settings = _get_filter(fh)
if not settings['digital_filter']:
filter_string = 'no filter'
else:
lb = settings['lb']
ub = settings['ub']
filter_string = f'{lb:.0f}Hz to {ub:.0f}Hz filter'
elif filter_settings is None:
filter_string = 'no filter'
else:
lb = filter_settings['lb']
ub = filter_settings['ub']
filter_string = f'{lb:.0f}Hz to {ub:.0f}Hz filter'
order = filter_settings['order']
if order != 1:
filter_string = f'{order:.0f} order {filter_string}'
file_string = f'{base_string} with {filter_string}'
if suffix is not None:
file_string = f'{file_string} {suffix}'
return f'{file_string} {{}}'
def _get_filter(fh):
if not isinstance(fh, (abr.ABRFile, abr.ABRSupersetFile)):
fh = abr.load(fh)
return {
'digital_filter': fh.get_setting_default('digital_filter', True),
'lb': fh.get_setting_default('digital_highpass', 300),
'ub': fh.get_setting_default('digital_lowpass', 3000),
# Filter order is not currently an option in the psiexperiment ABR
# program so it defaults to 1.
'order': 1,
}
def _get_epochs(fh, offset, duration, filter_settings, reject_ratio=None,
downsample=None, cb=None):
# We need to do the rejects in this code so that we can obtain the
# information for generating the CSV files. Set reject_threshold to np.inf
# to ensure that nothing gets rejected.
kwargs = {'offset': offset, 'duration': duration, 'columns': COLUMNS,
'reject_threshold': np.inf, 'downsample': downsample, 'cb': cb,
'bypass_cache': False}
if filter_settings is None:
return fh.get_epochs(**kwargs)
if filter_settings == 'saved':
settings = _get_filter(fh)
if not settings['digital_filter']:
return fh.get_epochs(**kwargs)
lb = settings['lb']
ub = settings['ub']
order = settings['order']
kwargs.update({'filter_lb': lb, 'filter_ub': ub, 'filter_order': order})
return fh.get_epochs_filtered(**kwargs)
lb = filter_settings['lb']
ub = filter_settings['ub']
order = filter_settings['order']
kwargs.update({'filter_lb': lb, 'filter_ub': ub, 'filter_order': order})
return fh.get_epochs_filtered(**kwargs)
def is_processed(filename, offset, duration, filter_settings, n_epochs=None,
simple_filename=True, export_single_trial=False,
processed_directory=None, directory_depth=None):
file_template = get_file_template(filename, offset, duration,
filter_settings, n_epochs,
simple_filename=simple_filename,
include_filename=False)
file_template = str(filename / file_template)
suffixes = ['waveforms.pdf', 'average waveforms.csv',
'processing settings.json', 'experiment settings.json']
if export_single_trial:
suffixes.append('individual waveforms.csv')
for suffix in suffixes:
filename = Path(file_template.format(suffix))
if not filename.exists():
return False
return True
def add_trial(epochs):
'''
This adds trial number on a per-stim-level/frequency basis
'''
def number_trials(subset):
subset = subset.sort_index(level='t0')
idx = subset.index.to_frame()
i = len(idx.columns) - 1
idx.insert(i, 'trial', np.arange(len(idx)))
subset.index = pd.MultiIndex.from_frame(idx)
return subset
levels = list(epochs.index.names[:-1])
if 'polarity' in levels:
levels.remove('polarity')
return epochs.groupby(levels, group_keys=False).apply(number_trials)
def process_folder(folder, filter_settings=None):
if abr.is_abr_experiment(folder):
files = [folder]
else:
files = list(Path(folder).glob('*abr_io*'))
process_files(files, filter_settings=filter_settings, cb='tqdm')
def process_files(filenames, offset=-0.001, duration=0.01,
filter_settings=None, cb='tqdm'):
success = []
error = []
for filename in tqdm(filenames):
try:
processed = process_file(filename, offset=offset,
duration=duration,
filter_settings=filter_settings, cb=cb)
success.append(filename)
except Exception as e:
raise e
error.append((filename, e))
print(f'Successfully processed {len(success)} files with {len(error)} errors')
def process_file(filename, offset=-1e-3, duration=10e-3,
filter_settings='saved', n_epochs='auto',
simple_filename=True, export_single_trial=False, cb=None,
file_template=None, target_fs=12.5e3, analysis_window=None,
latency_correction=0, gain_correction=1, debug_mode=False,
plot_waveforms_cb=None):
'''
Extract ABR epochs, filter and save result to CSV files
Parameters
----------
filename : path
Path to ABR experiment. If it's a set of ABR experiments, epochs across
all experiments will be combined for the analysis.
offset : sec
The start of the epoch to extract, in seconds, relative to tone pip
onset. Negative values can be used to extract a prestimulus baseline.
duration: sec
The duration of the epoch to extract, in seconds, relative to the
offset. If offset is set to -0.001 sec and duration is set to 0.01 sec,
then the epoch will be extracted from -0.001 to 0.009 sec re tone pip
onset.
filter_settings : {None, 'saved', dict}
If None, no additional filtering is done. If 'saved', uses the digital
filter settings that were saved in the ABR file. If a dictionary, must
contain 'lb' (the lower bound of the passband in Hz) and 'ub' (the
upper bound of the passband in Hz).
n_epochs : {None, 'auto', int, dict}
If None, all epochs will be used. If 'auto', use the value defined at
acquisition time. If integer, will limit the number of epochs per
frequency and level to this number.
simple_filename : bool
If True, do not embed settings used for processing data in filename.
export_single_trial : bool
If True, export single trials.
cb : {None, 'tqdm', callable}
If a callable, takes one value (the estimate of percent done as a
fraction). If 'tqdm', progress will be printed to the console.
file_template : {None, str}
Template that will be used to determine names (and path) of processed
files.
target_fs : float
Closest sampling rate to target
analysis_window : Ignored
This is ignored for now. Primarily used to allow acceptance of the
queue since we add analysis window for GUI purposes.
latency_correction : float
Correction, in seconds, to apply to timing of ABR. This allows us to
retroactively correct for any ADC or DAC delays that were present in
the acquisition system.
gain_correction : float
Correction to apply to the scaling of the waveform. This allows us to
retroactively correct for differences in gain that were present in the
acquisition system.
debug_mode : bool
This is reserved for internal use only. This mode will load the epochs
and return them without saving to disk.
plot_waveforms_cb : {Callable, None}
Callback that takes three arguments. Epoch mean dataframe, path to file
to save figures in, and name of file.
'''
settings = locals()
# Define the callback as a no-op if not provided or sets up tqdm if requested.
if cb is None:
cb = lambda x: x
elif cb == 'tqdm':
pbar = tqdm(total=100, bar_format='{l_bar}{bar}[{elapsed}<{remaining}]')
def cb(frac):
nonlocal pbar
frac *= 100
pbar.update(frac - pbar.n)
if frac == 100:
pbar.close()
filename = Path(filename)
# Cleanup settings so that it is JSON-serializable
settings.pop('cb')
settings['filename'] = str(settings['filename'])
settings['creation_time'] = dt.datetime.now().isoformat()
fh = abr.load(filename)
if len(fh.erp_metadata) == 0:
raise IOError('No data in file')
# This is a hack to ensure that native Python types are returned instead of
# Numpy ones. Newer versions of Pandas have fixed this issue, though.
md = fh.erp_metadata.iloc[:1].to_dict('records')[0]
for column in COLUMNS:
del md[column]
del md['t0']
downsample = int(ceil(fh.eeg.fs / target_fs))
settings['downsample'] = downsample
settings['actual_fs'] = fh.eeg.fs / downsample
if n_epochs is not None:
if n_epochs == 'auto':
n_epochs = fh.get_setting('averages')
cb(0)
if file_template is None:
file_template = get_file_template(
filename, offset, duration, filter_settings, n_epochs,
simple_filename=simple_filename, include_filename=False)
file_template = str(filename / file_template)
raw_epoch_file = Path(file_template.format('individual waveforms.csv'))
mean_epoch_file = Path(file_template.format('average waveforms.csv'))
settings_file = Path(file_template.format('processing settings.json'))
experiment_file = Path(file_template.format('experiment settings.json'))
figure_file = Path(file_template.format('waveforms.pdf'))
# Load the epochs. The callbacks for loading the epochs return a value in
# the range 0 ... 1. Since this only represents "half" the total work we
# need to do, rescale to the range 0 ... 0.5.
def cb_rescale(frac):
nonlocal cb
cb(frac * 0.5)
epochs = _get_epochs(fh, offset + latency_correction, duration,
filter_settings, cb=cb_rescale, downsample=downsample)
if gain_correction != 1:
epochs = epochs * gain_correction
if latency_correction != 0:
new_idx = [(*r[:-1], r[-1] - latency_correction) for r in epochs.index]
new_idx = pd.MultiIndex.from_tuples(new_idx, names=epochs.index.names)
new_col = epochs.columns - latency_correction
epochs = | pd.DataFrame(epochs.values, index=new_idx, columns=new_col) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 24 14:42:41 2017
@author: <NAME>
"""
# create dummy variables for catagorical variables with two categories like sex can be either male or female
import pandas as pd
path = 'C:/Users/<NAME>/Documents/Shreeya_Programming/Predictive/Chapter 2'
filename1 = 'titanic3.csv'
fullpath = path + '/' + filename1
data = | pd.read_csv(fullpath) | pandas.read_csv |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import datetime as dt
import os
from typing import Union
import numpy as np
import pandas as pd
import pytest
import pytz
from gs_quant.target.common import XRef, PricingLocation, Currency as CurrEnum
from numpy.testing import assert_allclose
from pandas.testing import assert_series_equal
from pandas.tseries.offsets import CustomBusinessDay
from pytz import timezone
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures as tm
import gs_quant.timeseries.measures_rates as tm_rates
from gs_quant.api.gs.assets import GsTemporalXRef, GsAssetApi, GsIdType, IdList, GsAsset
from gs_quant.api.gs.data import GsDataApi, MarketDataResponseFrame
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.dataset import Dataset
from gs_quant.data.fields import Fields
from gs_quant.errors import MqError, MqValueError, MqTypeError
from gs_quant.markets.securities import AssetClass, Cross, Index, Currency, SecurityMaster, Stock, \
Swap, CommodityNaturalGasHub
from gs_quant.session import GsSession, Environment
from gs_quant.test.timeseries.utils import mock_request
from gs_quant.timeseries import Returns
from gs_quant.timeseries.measures import BenchmarkType
_index = [pd.Timestamp('2019-01-01')]
_test_datasets = ('TEST_DATASET',)
def mock_empty_market_data_response():
df = MarketDataResponseFrame()
df.dataset_ids = ()
return df
def map_identifiers_default_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-LIBOR-BBA" in ids:
return {"USD-LIBOR-BBA": "MAPDB7QNB2TZVQ0E"}
elif "EUR-EURIBOR-TELERATE" in ids:
return {"EUR-EURIBOR-TELERATE": "MAJNQPFGN1EBDHAE"}
elif "GBP-LIBOR-BBA" in ids:
return {"GBP-LIBOR-BBA": "MAFYB8Z4R1377A19"}
elif "JPY-LIBOR-BBA" in ids:
return {"JPY-LIBOR-BBA": "MABMVE27EM8YZK33"}
elif "EUR OIS" in ids:
return {"EUR OIS": "MARFAGXDQRWM07Y2"}
def map_identifiers_swap_rate_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m" in ids:
return {"USD-3m": "MAAXGV0GZTW4GFNC"}
elif "EUR-6m" in ids:
return {"EUR-6m": "MA5WM2QWRVMYKDK0"}
elif "KRW" in ids:
return {"KRW": 'MAJ6SEQH3GT0GA2Z'}
def map_identifiers_inflation_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "CPI-UKRPI" in ids:
return {"CPI-UKRPI": "MAQ7ND0MBP2AVVQW"}
elif "CPI-CPXTEMU" in ids:
return {"CPI-CPXTEMU": "MAK1FHKH5P5GJSHH"}
def map_identifiers_cross_basis_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m/JPY-3m" in ids:
return {"USD-3m/JPY-3m": "MA99N6C1KF9078NM"}
elif "EUR-3m/USD-3m" in ids:
return {"EUR-3m/USD-3m": "MAXPKTXW2D4X6MFQ"}
elif "GBP-3m/USD-3m" in ids:
return {"GBP-3m/USD-3m": "MA8BZHQV3W32V63B"}
def get_data_policy_rate_expectation_mocker(
start: Union[dt.date, dt.datetime] = None,
end: Union[dt.date, dt.datetime] = None,
as_of: dt.datetime = None,
since: dt.datetime = None,
fields: Union[str, Fields] = None,
asset_id_type: str = None,
**kwargs) -> pd.DataFrame:
if 'meetingNumber' in kwargs:
if kwargs['meetingNumber'] == 0:
return mock_meeting_spot()
elif 'meeting_date' in kwargs:
if kwargs['meeting_date'] == dt.date(2019, 10, 24):
return mock_meeting_spot()
return mock_meeting_expectation()
def test_parse_meeting_date():
assert tm.parse_meeting_date(5) == ''
assert tm.parse_meeting_date('') == ''
assert tm.parse_meeting_date('test') == ''
assert tm.parse_meeting_date('2019-09-01') == dt.date(2019, 9, 1)
def test_currency_to_default_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
asset_id_list = ["MAZ7RWC904JYHYPS", "MAJNQPFGN1EBDHAE", "MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8",
"MA4B66MW5E27U8P32SB"]
correct_mapping = ["MAPDB7QNB2TZVQ0E", "MAJNQPFGN1EBDHAE", "MAFYB8Z4R1377A19", "MABMVE27EM8YZK33",
"MA4J1YB8XZP2BPT8", "MA4B66MW5E27U8P32SB"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_default_swap_rate_asset(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_swap_rate_mocker)
asset_id_list = ['MAZ7RWC904JYHYPS', 'MAJNQPFGN1EBDHAE', 'MAJ6SEQH3GT0GA2Z']
correct_mapping = ['MAAXGV0GZTW4GFNC', 'MA5WM2QWRVMYKDK0', 'MAJ6SEQH3GT0GA2Z']
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_swap_rate_asset(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_inflation_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_inflation_mocker)
asset_id_list = ["MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
correct_mapping = ["MAQ7ND0MBP2AVVQW", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_inflation_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.currency_to_inflation_benchmark_rate('MA66CZBQJST05XKG') == 'MA66CZBQJST05XKG'
def test_cross_to_basis(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_cross_basis_mocker)
asset_id_list = ["MAYJPCVVF2RWXCES", "MA4B66MW5E27U8P32SB", "nobbid"]
correct_mapping = ["MA99N6C1KF9078NM", "MA4B66MW5E27U8P32SB", "nobbid"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_basis(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.cross_to_basis('MAYJPCVVF2RWXCES') == 'MAYJPCVVF2RWXCES'
def test_currency_to_tdapi_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA25DW5ZGC1BSC8Y', 'NOK')
bbid_mock.return_value = 'NOK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAFRSWPAF5QPNTP2' == correct_id
bbid_mock.return_value = 'CHF'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAW25BGQJH9P6DPT' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAA9MVX15AJNQCVG' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA6QCAP9B7ABS9HA' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAEE219J5ZP0ZKRK' == correct_id
bbid_mock.return_value = 'SEK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAETMVTPNP3199A5' == correct_id
bbid_mock.return_value = 'HKD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MABRNGY8XRFVC36N' == correct_id
bbid_mock.return_value = 'NZD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAH16NHE1HBN0FBZ' == correct_id
bbid_mock.return_value = 'AUD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAY8147CRK0ZP53B' == correct_id
bbid_mock.return_value = 'CAD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MANJ8SS88WJ6N28Q' == correct_id
bbid_mock.return_value = 'KRW'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAP55AXG5SQVS6C5' == correct_id
bbid_mock.return_value = 'INR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA20JHJXN1PD5HGE' == correct_id
bbid_mock.return_value = 'CNY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA4K1D8HH2R0RQY5' == correct_id
bbid_mock.return_value = 'SGD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA5CQFHYBPH9E5BS' == correct_id
bbid_mock.return_value = 'DKK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAF131NKWVRESFYA' == correct_id
asset = Currency('MA890', 'PLN')
bbid_mock.return_value = 'PLN'
assert 'MA890' == tm_rates._currency_to_tdapi_swap_rate_asset(asset)
replace.restore()
def test_currency_to_tdapi_basis_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA890', 'NOK')
bbid_mock.return_value = 'NOK'
assert 'MA890' == tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAQB1PGEJFCET3GG' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAGRG2VT11GQ2RQ9' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAHCYNB3V75JC5Q8' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAXVRBEZCJVH0C4V' == correct_id
replace.restore()
def test_check_clearing_house():
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house('lch')
assert tm_rates._ClearingHouse.CME == tm_rates._check_clearing_house(tm_rates._ClearingHouse.CME)
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house(None)
invalid_ch = ['NYSE']
for ch in invalid_ch:
with pytest.raises(MqError):
tm_rates._check_clearing_house(ch)
def test_get_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
assert dict(csaTerms='USD-1') == tm_rates._get_swap_csa_terms('USD', fed_funds_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_swap_csa_terms('EUR', estr_index)
assert {} == tm_rates._get_swap_csa_terms('EUR', euribor_index)
assert {} == tm_rates._get_swap_csa_terms('USD', usd_libor_index)
def test_get_basis_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
sofr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.SOFR.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
eonia_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EONIA.value]
assert dict(csaTerms='USD-1') == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, sofr_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_basis_swap_csa_terms('EUR', estr_index, eonia_index)
assert {} == tm_rates._get_basis_swap_csa_terms('EUR', eonia_index, euribor_index)
assert {} == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, usd_libor_index)
def test_match_floating_tenors():
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='6m')
assert swap_args == tm_rates._match_floating_tenors(swap_args)
def test_get_term_struct_date(mocker):
today = datetime.datetime.today()
biz_day = CustomBusinessDay()
assert today == tm_rates._get_term_struct_date(tenor=today, index=today, business_day=biz_day)
date_index = datetime.datetime(2020, 7, 31, 0, 0)
assert date_index == tm_rates._get_term_struct_date(tenor='2020-07-31', index=date_index, business_day=biz_day)
assert date_index == tm_rates._get_term_struct_date(tenor='0b', index=date_index, business_day=biz_day)
assert datetime.datetime(2021, 7, 30, 0, 0) == tm_rates._get_term_struct_date(tenor='1y', index=date_index,
business_day=biz_day)
def test_cross_stored_direction_for_fx_vol(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_stored_direction_for_fx_vol(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_usd_based_cross_for_fx_forecast(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_usd_based_cross(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_used_based_cross(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_to_usd_based_cross(Cross('FUN', 'EURUSD'))
replace.restore()
def test_cross_stored_direction(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_stored_direction_for_fx_vol(Cross('FUN', 'EURUSD'))
replace.restore()
def test_get_tdapi_rates_assets(mocker):
mock_asset_1 = GsAsset(asset_class='Rate', id='MAW25BGQJH9P6DPT', type_='Swap', name='Test_asset')
mock_asset_2 = GsAsset(asset_class='Rate', id='MAA9MVX15AJNQCVG', type_='Swap', name='Test_asset')
mock_asset_3 = GsAsset(asset_class='Rate', id='MANQHVYC30AZFT7R', type_='BasisSwap', name='Test_asset')
replace = Replacer()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1]
assert 'MAW25BGQJH9P6DPT' == tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict(asset_parameters_termination_date='10y', asset_parameters_effective_date='0b')
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = []
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict()
assert ['MAW25BGQJH9P6DPT', 'MAA9MVX15AJNQCVG'] == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
# test case will test matching sofr maturity with libor leg and flipping legs to get right asset
kwargs = dict(type='BasisSwap', asset_parameters_termination_date='10y',
asset_parameters_payer_rate_option=BenchmarkType.LIBOR,
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=BenchmarkType.SOFR,
asset_parameters_receiver_designated_maturity='1y',
asset_parameters_clearing_house='lch', asset_parameters_effective_date='Spot',
asset_parameters_notional_currency='USD',
pricing_location='NYC')
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_3]
assert 'MANQHVYC30AZFT7R' == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
def test_get_swap_leg_defaults():
result_dict = dict(currency=CurrEnum.JPY, benchmark_type='JPY-LIBOR-BBA', floating_rate_tenor='6m',
pricing_location=PricingLocation.TKO)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.JPY)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.USD, benchmark_type='USD-LIBOR-BBA', floating_rate_tenor='3m',
pricing_location=PricingLocation.NYC)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.USD)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.EUR, benchmark_type='EUR-EURIBOR-TELERATE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.EUR)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.SEK, benchmark_type='SEK-STIBOR-SIDE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.SEK)
assert result_dict == defaults
def test_check_forward_tenor():
valid_tenors = [datetime.date(2020, 1, 1), '1y', 'imm2', 'frb2', '1m', '0b']
for tenor in valid_tenors:
assert tenor == tm_rates._check_forward_tenor(tenor)
invalid_tenors = ['5yr', 'imm5', 'frb0']
for tenor in invalid_tenors:
with pytest.raises(MqError):
tm_rates._check_forward_tenor(tenor)
def mock_commod(_cls, _q):
d = {
'price': [30, 30, 30, 30, 35.929686, 35.636039, 27.307498, 23.23177, 19.020833, 18.827291, 17.823749, 17.393958,
17.824999, 20.307603, 24.311249, 25.160103, 25.245728, 25.736873, 28.425206, 28.779789, 30.519996,
34.896348, 33.966973, 33.95489, 33.686348, 34.840307, 32.674163, 30.261665, 30, 30, 30]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-05-01', periods=31, freq='H', tz=timezone('UTC')))
df.dataset_ids = _test_datasets
return df
def mock_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
11.6311,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 9))
df.dataset_ids = _test_datasets
return df
def mock_fair_price(_cls, _q):
d = {
'fairPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_natgas_forward_price(_cls, _q):
d = {
'forwardPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_fair_price_swap(_cls, _q):
d = {'fairPrice': [2.880]}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)]))
df.dataset_ids = _test_datasets
return df
def mock_implied_volatility(_cls, _q):
d = {
'impliedVolatility': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_missing_bucket_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"J20",
"K20",
"M20",
]
}
return pd.DataFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 8))
def mock_fx_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
d = {
'strikeReference': ['delta', 'spot', 'forward'],
'relativeStrike': [25, 100, 100],
'impliedVolatility': [5, 1, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-01-01', periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_fx_forecast(_cls, _q):
d = {
'fxForecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_delta(_cls, _q):
d = {
'relativeStrike': [25, -25, 0],
'impliedVolatility': [1, 5, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_empty(_cls, _q):
d = {
'strikeReference': [],
'relativeStrike': [],
'impliedVolatility': []
}
df = MarketDataResponseFrame(data=d, index=[])
df.dataset_ids = _test_datasets
return df
def mock_fx_switch(_cls, _q, _n):
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_empty)
replace.restore()
return Cross('MA1889', 'ABC/XYZ')
def mock_curr(_cls, _q):
d = {
'swapAnnuity': [1, 2, 3],
'swapRate': [1, 2, 3],
'basisSwapRate': [1, 2, 3],
'swaptionVol': [1, 2, 3],
'atmFwdRate': [1, 2, 3],
'midcurveVol': [1, 2, 3],
'capFloorVol': [1, 2, 3],
'spreadOptionVol': [1, 2, 3],
'inflationSwapRate': [1, 2, 3],
'midcurveAtmFwdRate': [1, 2, 3],
'capFloorAtmFwdRate': [1, 2, 3],
'spreadOptionAtmFwdRate': [1, 2, 3],
'strike': [0.25, 0.5, 0.75]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_cross(_cls, _q):
d = {
'basis': [1, 2, 3],
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq(_cls, _q):
d = {
'relativeStrike': [0.75, 0.25, 0.5],
'impliedVolatility': [5, 1, 2],
'impliedCorrelation': [5, 1, 2],
'realizedCorrelation': [3.14, 2.71828, 1.44],
'averageImpliedVolatility': [5, 1, 2],
'averageImpliedVariance': [5, 1, 2],
'averageRealizedVolatility': [5, 1, 2],
'impliedVolatilityByDeltaStrike': [5, 1, 2],
'fundamentalMetric': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
idx = [pd.Timestamp(datetime.datetime.now(pytz.UTC))]
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=idx)
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.datetime.now(pytz.UTC).date() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_err(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
raise MqValueError('error while getting last')
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_empty(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame()
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_norm(_cls, _q):
d = {
'relativeStrike': [-4.0, 4.0, 0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_spot(_cls, _q):
d = {
'relativeStrike': [0.75, 1.25, 1.0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_inc(_cls, _q):
d = {
'relativeStrike': [0.25, 0.75],
'impliedVolatility': [5, 1]
}
df = MarketDataResponseFrame(data=d, index=_index * 2)
df.dataset_ids = _test_datasets
return df
def mock_meeting_expectation():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2020, 1, 29)],
'endingDate': [dt.date(2020, 1, 29)],
'meetingNumber': [2],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2020, 1, 23)],
'value': [-0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_spot():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2019, 10, 30)],
'endingDate': [dt.date(2019, 12, 18)],
'meetingNumber': [0],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2019, 10, 24)],
'value': [-0.004522570525]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_absolute():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2', 'MARFAGXDQRWM07Y2'],
'location': ['NYC', 'NYC'],
'rateType': ['Meeting Forward', 'Meeting Forward'],
'startingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'endingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'meetingNumber': [0, 2],
'valuationDate': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 10, 24), datetime.date(2020, 1, 23)],
'value': [-0.004522570525, -0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_ois_spot():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Spot'],
'startingDate': [datetime.date(2019, 12, 6)],
'endingDate': [datetime.date(2019, 12, 7)],
'meetingNumber': [-1],
'valuationDate': [datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 12, 6)],
'value': [-0.00455]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_esg(_cls, _q):
d = {
"esNumericScore": [2, 4, 6],
"esNumericPercentile": [81.2, 75.4, 65.7],
"esPolicyScore": [2, 4, 6],
"esPolicyPercentile": [81.2, 75.4, 65.7],
"esScore": [2, 4, 6],
"esPercentile": [81.2, 75.4, 65.7],
"esProductImpactScore": [2, 4, 6],
"esProductImpactPercentile": [81.2, 75.4, 65.7],
"gScore": [2, 4, 6],
"gPercentile": [81.2, 75.4, 65.7],
"esMomentumScore": [2, 4, 6],
"esMomentumPercentile": [81.2, 75.4, 65.7],
"gRegionalScore": [2, 4, 6],
"gRegionalPercentile": [81.2, 75.4, 65.7],
"controversyScore": [2, 4, 6],
"controversyPercentile": [81.2, 75.4, 65.7],
"esDisclosurePercentage": [49.2, 55.7, 98.4]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_index_positions_data(
asset_id,
start_date,
end_date,
fields=None,
position_type=None
):
return [
{'underlyingAssetId': 'MA3',
'netWeight': 0.1,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA1',
'netWeight': 0.6,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA2',
'netWeight': 0.3,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
}
]
def mock_rating(_cls, _q):
d = {
'rating': ['Buy', 'Sell', 'Buy', 'Neutral'],
'convictionList': [1, 0, 0, 0]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_gsdeer_gsfeer(_cls, assetId, start_date):
d = {
'gsdeer': [1, 1.2, 1.1],
'gsfeer': [2, 1.8, 1.9],
'year': [2000, 2010, 2020],
'quarter': ['Q1', 'Q2', 'Q3']
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
return df
def mock_factor_profile(_cls, _q):
d = {
'growthScore': [0.238, 0.234, 0.234, 0.230],
'financialReturnsScore': [0.982, 0.982, 0.982, 0.982],
'multipleScore': [0.204, 0.192, 0.190, 0.190],
'integratedScore': [0.672, 0.676, 0.676, 0.674]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_commodity_forecast(_cls, _q):
d = {
'forecastPeriod': ['3m', '3m', '3m', '3m'],
'forecastType': ['spotReturn', 'spotReturn', 'spotReturn', 'spotReturn'],
'commodityForecast': [1700, 1400, 1500, 1600]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def test_skew():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_norm)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.NORMALIZED, 4)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_spot)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock.return_value = mock_empty_market_data_response()
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert actual.empty
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_inc)
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
replace.restore()
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', None, 25)
def test_skew_fx():
replace = Replacer()
cross = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = cross
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_delta)
mock = cross
actual = tm.skew(mock, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.DELTA, 25, real_time=True)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.SPOT, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.FORWARD, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', None, 25)
replace.restore()
def test_implied_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol)
idx = pd.date_range(end=datetime.datetime.now(pytz.UTC).date(), periods=4, freq='D')
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_NEUTRAL)
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL)
replace.restore()
def test_implied_vol_no_last():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
idx = pd.date_range(end=datetime.date.today() - datetime.timedelta(days=1), periods=3, freq='D')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_err)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_empty)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace.restore()
def test_implied_vol_fx():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
# for different delta strikes
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_vol)
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL, 25)
expected = pd.Series([5, 1, 2, 3], index=pd.date_range('2019-01-01', periods=4, freq='D'), name='impliedVolatility')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_PUT, 25)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_NEUTRAL)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# NORMALIZED not supported
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 25)
replace.restore()
def test_fx_forecast():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
actual = tm.fx_forecast(mock, '12m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fx_forecast(mock, '3m', real_time=True)
replace.restore()
def test_fx_forecast_inverse():
replace = Replacer()
get_cross = replace('gs_quant.timeseries.measures.cross_to_usd_based_cross', Mock())
get_cross.return_value = "MATGYV0J9MPX534Z"
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
mock = Cross("MAYJPCVVF2RWXCES", 'USD/JPY')
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1 / 1.1, 1 / 1.1, 1 / 1.1], index=_index * 3, name='fxForecast'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_vol_smile():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.FORWARD, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d')
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
with pytest.raises(NotImplementedError):
tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d', real_time=True)
replace.restore()
def test_impl_corr():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_CALL, 50, '')
replace.restore()
def test_impl_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5,
composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
i_vol = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_in.csv'))
i_vol.index = pd.to_datetime(i_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = i_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 50, datetime.date(2020, 8, 31),
source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_real_corr():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(NotImplementedError):
tm.realized_correlation(spx, '1m', real_time=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.realized_correlation(spx, '1m')
assert_series_equal(pd.Series([3.14, 2.71828, 1.44], index=_index * 3), pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_real_corr_missing():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
d = {
'assetId': ['MA4B66MW5E27U8P32SB'] * 3,
'spot': [3000, 3100, 3050],
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2020-08-01', periods=3, freq='D'))
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', lambda *args, **kwargs: df)
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 50)
replace.restore()
def test_real_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
r_vol = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_in.csv'))
r_vol.index = pd.to_datetime(r_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = r_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.realized_correlation(spx, '1m', 50, datetime.date(2020, 8, 31), source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_cds_implied_vol():
replace = Replacer()
mock_cds = Index('MA890', AssetClass.Equity, 'CDS')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.DELTA_CALL, 10)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.FORWARD, 100)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cds_implied_volatility(..., '1m', '5y', tm.CdsVolReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_avg_impl_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'impliedVolatility': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'impliedVolatility': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'impliedVolatility': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock_implied_vol = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_implied_vol.dataset_ids = _test_datasets
market_data_mock.return_value = mock_implied_vol
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25, 3, '1d')
assert_series_equal(pd.Series([1.4, 2.6, 3.33333],
index=pd.date_range(start='2020-01-01', periods=3), name='averageImpliedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqValueError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=None,
composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=101)
replace.restore()
def test_avg_realized_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_realized_volatility(mock_spx, '1m')
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageRealizedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'spot': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'spot': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'spot': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
mock_spot = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_spot.dataset_ids = _test_datasets
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_data_mock.return_value = mock_spot
actual = tm.average_realized_volatility(mock_spx, '2d', Returns.SIMPLE, 3, '1d')
assert_series_equal(pd.Series([392.874026], index=pd.date_range(start='2020-01-03', periods=1),
name='averageRealizedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', real_time=True)
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.LOGARITHMIC)
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 201)
replace.restore()
empty_positions_data_mock = replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', Mock())
empty_positions_data_mock.return_value = []
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 5)
replace.restore()
def test_avg_impl_var():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert actual.dataset_ids == _test_datasets
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_variance(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_basis_swap_spread(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', spread_benchmark_type=None, spread_tenor=None,
reference_benchmark_type=None, reference_tenor=None, forward_tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'NOK')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'NOK'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(..., '1y', real_time=True)
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['swap_tenor'] = '6y'
args['spread_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_tenor'] = '3m'
args['reference_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_tenor'] = '6m'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['forward_tenor'] = None
args['spread_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_benchmark_type'] = BenchmarkType.LIBOR
args['reference_benchmark_type'] = 'libor_3m'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_benchmark_type'] = BenchmarkType.LIBOR
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAQB1PGEJFCET3GG'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
args['reference_benchmark_type'] = BenchmarkType.SOFR
args['reference_tenor'] = '1y'
args['reference_benchmark_type'] = BenchmarkType.LIBOR
args['reference_tenor'] = '3m'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MA06ATQ9CM0DCZFC'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_swap_rate(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', benchmark_type=None, floating_rate_tenor=None, forward_tenor='0b', real_time=False)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['swap_tenor'] = '10y'
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['floating_rate_tenor'] = '1y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['forward_tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'sonia'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'fed_funds'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAZ7RWC904JYHYPS'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'EUR'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAJNQPFGN1EBDHAE'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
args['asset'] = Currency('MAJNQPFGN1EBDHAE', 'EUR')
args['benchmark_type'] = 'estr'
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_swap_annuity(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', benchmark_type=None, floating_rate_tenor=None, forward_tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'PLN')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'PLN'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.swap_annuity(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.swap_annuity(..., '1y', real_time=True)
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['swap_tenor'] = '10y'
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['floating_rate_tenor'] = '1y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['forward_tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['benchmark_type'] = BenchmarkType.SOFR
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAZ7RWC904JYHYPS'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.swap_annuity(**args)
expected = abs(tm.ExtendedSeries([1.0, 2.0, 3.0], index=_index * 3, name='swapAnnuity') * 1e4 / 1e8)
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_swap_term_structure():
replace = Replacer()
args = dict(benchmark_type=None, floating_rate_tenor=None, tenor_type=tm_rates._SwapTenorType.FORWARD_TENOR,
tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'PLN')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'PLN'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.swap_term_structure(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.swap_term_structure(..., '1y', real_time=True)
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['floating_rate_tenor'] = '3m'
args['tenor_type'] = 'expiry'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor_type'] = None
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['benchmark_type'] = BenchmarkType.LIBOR
bd_mock = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
bd_mock.return_value = pd.DataFrame(data=dict(date="2020-04-10", exchange="NYC", description="Good Friday"),
index=[pd.Timestamp('2020-04-10')])
args['pricing_date'] = datetime.date(2020, 4, 10)
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['pricing_date'] = None
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers_empty = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
identifiers_empty.return_value = {}
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
mock_asset = Currency('USD', name='USD')
mock_asset.id = 'MAEMPCXQG3T716EX'
mock_asset.exchange = 'OTC'
identifiers.return_value = [mock_asset]
d = {
'terminationTenor': ['1y', '2y', '3y', '4y'], 'swapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
pricing_date_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
pricing_date_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
market_data_mock.return_value = pd.DataFrame()
df = pd.DataFrame(data=d, index=_index * 4)
assert tm_rates.swap_term_structure(**args).empty
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
df = pd.DataFrame(data={'effectiveTenor': ['1y'], 'swapRate': [1], 'assetId': ['MAEMPCXQG3T716EX']}, index=_index)
market_data_mock.return_value = df
args['tenor_type'] = 'swap_tenor'
args['tenor'] = '5y'
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1], index=pd.to_datetime(['2020-01-01']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
d = {
'effectiveTenor': ['1y', '2y', '3y', '4y'], 'swapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
args['tenor_type'] = 'swap_tenor'
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor'] = '5y'
market_data_mock.return_value = pd.DataFrame()
df = pd.DataFrame(data=d, index=_index * 4)
assert tm_rates.swap_term_structure(**args).empty
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_basis_swap_term_structure():
replace = Replacer()
range_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
range_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
args = dict(spread_benchmark_type=None, spread_tenor=None,
reference_benchmark_type=None, reference_tenor=None, tenor_type=tm_rates._SwapTenorType.FORWARD_TENOR,
tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'NOK')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'NOK'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_term_structure(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_term_structure(..., '1y', real_time=True)
args['spread_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['spread_tenor'] = '3m'
args['reference_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['reference_tenor'] = '6m'
args['tenor_type'] = 'expiry'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['tenor_type'] = 'forward_tenor'
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['tenor'] = None
args['spread_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['spread_benchmark_type'] = BenchmarkType.LIBOR
args['reference_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['reference_benchmark_type'] = BenchmarkType.LIBOR
bd_mock = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
bd_mock.return_value = pd.DataFrame(data=dict(date="2020-04-10", exchange="NYC", description="Good Friday"),
index=[pd.Timestamp('2020-04-10')])
args['pricing_date'] = datetime.date(2020, 4, 10)
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['pricing_date'] = None
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers_empty = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
identifiers_empty.return_value = {}
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
mock_asset = Currency('USD', name='USD')
mock_asset.id = 'MAEMPCXQG3T716EX'
mock_asset.exchange = 'OTC'
identifiers.return_value = [mock_asset]
d = {
'terminationTenor': ['1y', '2y', '3y', '4y'], 'basisSwapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
pricing_date_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
pricing_date_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
market_data_mock.return_value = pd.DataFrame()
assert tm_rates.basis_swap_term_structure(**args).empty
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
d = {
'effectiveTenor': ['1y', '2y', '3y', '4y'], 'basisSwapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
args['tenor_type'] = tm_rates._SwapTenorType.SWAP_TENOR
args['tenor'] = '5y'
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
df = pd.DataFrame(data={'effectiveTenor': ['1y'], 'basisSwapRate': [1], 'assetId': ['MAEMPCXQG3T716EX']},
index=_index)
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1], index=pd.to_datetime(['2020-01-01']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_cap_floor_vol():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.cap_floor_vol(mock_usd, '5y', 50)
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='capFloorVol'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cap_floor_vol(..., '5y', 50, real_time=True)
replace.restore()
def test_cap_floor_atm_fwd_rate():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.cap_floor_atm_fwd_rate(mock_usd, '5y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='capFloorAtmFwdRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cap_floor_atm_fwd_rate(..., '5y', real_time=True)
replace.restore()
def test_spread_option_vol():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.spread_option_vol(mock_usd, '3m', '10y', '5y', 50)
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='spreadOptionVol'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.spread_option_vol(..., '3m', '10y', '5y', 50, real_time=True)
replace.restore()
def test_spread_option_atm_fwd_rate():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.spread_option_atm_fwd_rate(mock_usd, '3m', '10y', '5y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='spreadOptionAtmFwdRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.spread_option_atm_fwd_rate(..., '3m', '10y', '5y', real_time=True)
replace.restore()
def test_zc_inflation_swap_rate():
replace = Replacer()
mock_gbp = Currency('MA890', 'GBP')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='GBP', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'CPI-UKRPI': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.zc_inflation_swap_rate(mock_gbp, '1y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='inflationSwapRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.zc_inflation_swap_rate(..., '1y', real_time=True)
replace.restore()
def test_basis():
replace = Replacer()
mock_jpyusd = Cross('MA890', 'USD/JPY')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='JPYUSD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-3m/JPY-3m': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_cross)
actual = tm.basis(mock_jpyusd, '1y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='basis'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.basis(..., '1y', real_time=True)
replace.restore()
def test_td():
cases = {'3d': pd.DateOffset(days=3), '9w': pd.DateOffset(weeks=9), '2m': pd.DateOffset(months=2),
'10y': pd.DateOffset(years=10)
}
for k, v in cases.items():
actual = tm._to_offset(k)
assert v == actual, f'expected {v}, got actual {actual}'
with pytest.raises(ValueError):
tm._to_offset('5z')
def test_pricing_range():
import datetime
given = datetime.date(2019, 4, 20)
s, e = tm._range_from_pricing_date('NYSE', given)
assert s == e == given
class MockDate(datetime.date):
@classmethod
def today(cls):
return cls(2019, 5, 25)
# mock
replace = Replacer()
cbd = replace('gs_quant.timeseries.measures._get_custom_bd', Mock())
cbd.return_value = pd.tseries.offsets.BusinessDay()
today = replace('gs_quant.timeseries.measures.pd.Timestamp.today', Mock())
today.return_value = pd.Timestamp(2019, 5, 25)
gold = datetime.date
datetime.date = MockDate
# cases
s, e = tm._range_from_pricing_date('ANY')
assert s == pd.Timestamp(2019, 5, 24)
assert e == pd.Timestamp(2019, 5, 24)
s, e = tm._range_from_pricing_date('ANY', '3m')
assert s == pd.Timestamp(2019, 2, 22)
assert e == pd.Timestamp(2019, 2, 24)
s, e = tm._range_from_pricing_date('ANY', '3b')
assert s == e == pd.Timestamp(2019, 5, 22)
# restore
datetime.date = gold
replace.restore()
def test_var_swap_tenors():
session = GsSession.get(Environment.DEV, token='<PASSWORD>')
replace = Replacer()
get_mock = replace('gs_quant.session.GsSession._get', Mock())
get_mock.return_value = {
'data': [
{
'dataField': 'varSwap',
'filteredFields': [
{
'field': 'tenor',
'values': ['abc', 'xyc']
}
]
}
]
}
with session:
actual = tm._var_swap_tenors(Index('MAXXX', AssetClass.Equity, 'XXX'))
assert actual == ['abc', 'xyc']
get_mock.return_value = {
'data': []
}
with pytest.raises(MqError):
with session:
tm._var_swap_tenors(Index('MAXXX', AssetClass.Equity, 'XXX'))
replace.restore()
def test_tenor_to_month():
with pytest.raises(MqError):
tm._tenor_to_month('1d')
with pytest.raises(MqError):
tm._tenor_to_month('2w')
assert tm._tenor_to_month('3m') == 3
assert tm._tenor_to_month('4y') == 48
def test_month_to_tenor():
assert tm._month_to_tenor(36) == '3y'
assert tm._month_to_tenor(18) == '18m'
def test_forward_var_term():
idx = pd.DatetimeIndex([datetime.date(2020, 4, 1), datetime.date(2020, 4, 2)] * 6)
data = {
'varSwap': [1.1, 1, 2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5, 6.1, 6],
'tenor': ['1w', '1w', '1m', '1m', '5w', '5w', '2m', '2m', '3m', '3m', '5m', '5m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([np.nan, 5.29150, 6.55744], name='forwardVarTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02'], name='expirationDate'))
with DataContext('2020-01-01', '2020-07-31'):
actual = tm.forward_var_term(Index('MA123', AssetClass.Equity, '123'), datetime.date(2020, 4, 2))
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
expected_fx = pd.Series([np.nan, 5.29150, 6.55744, 7.24569], name='forwardVarTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02', '2020-09-02'],
name='expirationDate'))
with DataContext('2020-01-01', '2020-09-02'):
actual_fx = tm.forward_var_term(Cross('ABCDE', 'EURUSD'))
assert_series_equal(expected_fx, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_var_term(Index('MA123', AssetClass.Equity, '123'))
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_var_term(..., real_time=True)
replace.restore()
def _mock_var_swap_data(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
data = {
'varSwap': [1, 2, 3]
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
return out
def test_var_swap():
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_data)
expected = pd.Series([1, 2, 3, 4], name='varSwap', index=pd.date_range("2019-01-01", periods=4, freq="D"))
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m')
assert actual.empty
replace.restore()
def _mock_var_swap_fwd(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4, 4.5], 'tenor': ['1y', '13m']},
index=[pd.Timestamp('2019-01-04T12:00:00Z')] * 2)
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
d1 = {
'varSwap': [1, 2, 3],
'tenor': ['1y'] * 3
}
d2 = {
'varSwap': [1.5, 2.5, 3.5],
'tenor': ['13m'] * 3
}
df1 = MarketDataResponseFrame(data=d1, index=idx)
df2 = MarketDataResponseFrame(data=d2, index=idx)
out = pd.concat([df1, df2])
out.dataset_ids = _test_datasets
return out
def _mock_var_swap_1t(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4, 4.5], 'tenor': ['1y', '13m']},
index=[pd.Timestamp('2019-01-04T12:00:00Z')])
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
d1 = {
'varSwap': [1, 2, 3],
'tenor': ['1y'] * 3
}
df1 = MarketDataResponseFrame(data=d1, index=idx)
df1.dataset_ids = _test_datasets
return df1
def test_var_swap_fwd():
# bad input
with pytest.raises(MqError):
tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', 500)
# regular
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_fwd)
tenors_mock = replace('gs_quant.timeseries.measures._var_swap_tenors', Mock())
tenors_mock.return_value = ['1m', '1y', '13m']
expected = pd.Series([4.1533, 5.7663, 7.1589, 8.4410], name='varSwap',
index=pd.date_range(start="2019-01-01", periods=4, freq="D"))
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# no data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# no data for a tenor
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_1t)
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# no such tenors
tenors_mock.return_value = []
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# finish
replace.restore()
def _var_term_typical():
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'varSwap': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.var_term(Index('MA123', AssetClass.Equity, '123'))
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='varSwap')
expected = pd.Series([1, 2, 3, 4], name='varSwap', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _var_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'))
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def _var_term_fwd():
idx = pd.date_range('2018-01-01', periods=2, freq='D')
def mock_var_swap(_asset, tenor, _forward_start_date, **_kwargs):
if tenor == '1m':
series = tm.ExtendedSeries([1, 2], idx, name='varSwap')
series.dataset_ids = _test_datasets
elif tenor == '2m':
series = tm.ExtendedSeries([3, 4], idx, name='varSwap')
series.dataset_ids = _test_datasets
else:
series = tm.ExtendedSeries()
series.dataset_ids = ()
return series
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.var_swap', Mock())
market_mock.side_effect = mock_var_swap
tenors_mock = replace('gs_quant.timeseries.measures._var_swap_tenors', Mock())
tenors_mock.return_value = ['1m', '2m', '3m']
actual = tm.var_term(Index('MA123', AssetClass.Equity, '123'), forward_start_date='1m')
idx = pd.DatetimeIndex(['2018-02-02', '2018-03-02'], name='varSwap')
expected = pd.Series([2, 4], name='varSwap', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
market_mock.assert_called()
replace.restore()
return actual
def test_var_term():
with DataContext('2018-01-01', '2019-01-01'):
_var_term_typical()
_var_term_empty()
_var_term_fwd()
with DataContext('2019-01-01', '2019-07-04'):
_var_term_fwd()
with DataContext('2018-01-16', '2018-12-31'):
out = _var_term_typical()
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.var_term(..., pricing_date=300)
def test_forward_vol():
idx = pd.DatetimeIndex([datetime.date(2020, 5, 1), datetime.date(2020, 5, 2)] * 4)
data = {
'impliedVolatility': [2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5],
'tenor': ['1m', '1m', '2m', '2m', '3m', '3m', '4m', '4m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([5.58659, 5.47723], name='forwardVol',
index=pd.to_datetime(['2020-05-01', '2020-05-02']))
with DataContext('2020-01-01', '2020-09-01'):
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
with DataContext('2020-01-01', '2020-09-01'):
actual_fx = tm.forward_vol(Cross('ABCDE', 'EURUSD'), '1m', '2m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert actual.empty
# no data for required tenor
market_mock.reset_mock()
market_mock.return_value = MarketDataResponseFrame(data={'impliedVolatility': [2.1, 3.1, 5.1],
'tenor': ['1m', '2m', '4m']},
index=[datetime.date(2020, 5, 1)] * 3)
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_vol(..., '1m', '2m', tm.VolReference.SPOT, 100, real_time=True)
replace.restore()
def test_forward_vol_term():
idx = pd.DatetimeIndex([datetime.date(2020, 4, 1), datetime.date(2020, 4, 2)] * 6)
data = {
'impliedVolatility': [1.1, 1, 2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5, 6.1, 6],
'tenor': ['1w', '1w', '1m', '1m', '5w', '5w', '2m', '2m', '3m', '3m', '5m', '5m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([np.nan, 5.29150, 6.55744], name='forwardVolTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02'], name='expirationDate'))
with DataContext('2020-01-01', '2020-07-31'):
actual = tm.forward_vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.SPOT, 100,
datetime.date(2020, 4, 2))
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
expected_fx = pd.Series([np.nan, 5.29150, 6.55744, 7.24569], name='forwardVolTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02', '2020-09-02'],
name='expirationDate'))
with DataContext('2020-01-01', '2020-09-02'):
actual_fx = tm.forward_vol_term(Cross('ABCDE', 'EURUSD'), tm.VolReference.SPOT, 100)
assert_series_equal(expected_fx, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.SPOT, 100)
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_vol_term(..., tm.VolReference.SPOT, 100, real_time=True)
replace.restore()
def _vol_term_typical(reference, value):
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'impliedVolatility': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.vol_term(Index('MA123', AssetClass.Equity, '123'), reference, value)
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='impliedVolatility', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _vol_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = MarketDataResponseFrame()
actual = tm.vol_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'), tm.VolReference.DELTA_CALL, 777)
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def test_vol_term():
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_typical(tm.VolReference.SPOT, 100)
_vol_term_typical(tm.VolReference.NORMALIZED, 4)
_vol_term_typical(tm.VolReference.DELTA_PUT, 50)
_vol_term_empty()
with DataContext('2018-01-16', '2018-12-31'):
out = _vol_term_typical(tm.VolReference.SPOT, 100)
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.vol_term(..., tm.VolReference.SPOT, 100, real_time=True)
with pytest.raises(MqError):
tm.vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.DELTA_NEUTRAL, 0)
def _vol_term_fx(reference, value):
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'impliedVolatility': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
actual = tm.vol_term(Cross('ABCDE', 'EURUSD'), reference, value)
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='impliedVolatility', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def test_vol_term_fx():
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.SPOT, 50)
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.NORMALIZED, 1)
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.DELTA_NEUTRAL, 1)
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_fx(tm.VolReference.DELTA_CALL, 50)
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_fx(tm.VolReference.DELTA_PUT, 50)
def _fwd_term_typical():
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'forward': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.fwd_term(Index('MA123', AssetClass.Equity, '123'))
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='forward', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _fwd_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.fwd_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'))
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def test_fwd_term():
with DataContext('2018-01-01', '2019-01-01'):
_fwd_term_typical()
_fwd_term_empty()
with DataContext('2018-01-16', '2018-12-31'):
out = _fwd_term_typical()
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fwd_term(..., real_time=True)
def test_bucketize_price():
target = {
'7x24': [27.323461],
'offpeak': [26.004816],
'peak': [27.982783],
'7x8': [26.004816],
'2x16h': [],
'monthly': [],
'CAISO 7x24': [26.953743375],
'CAISO peak': [29.547952562499997],
'MISO 7x24': [27.076390749999998],
'MISO offpeak': [25.263605624999997],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_commod)
mock_pjm = Index('MA001', AssetClass.Commod, 'PJM')
mock_caiso = Index('MA002', AssetClass.Commod, 'CAISO')
mock_miso = Index('MA003', AssetClass.Commod, 'MISO')
with DataContext(datetime.date(2019, 5, 1), datetime.date(2019, 5, 1)):
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'MISO'
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['MISO 7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='offpeak')
assert_series_equal(pd.Series(target['MISO offpeak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
bbid_mock.return_value = 'CAISO'
actual = tm.bucketize_price(mock_caiso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['CAISO 7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_caiso, 'LMP', bucket='peak')
assert_series_equal(pd.Series(target['CAISO peak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
bbid_mock.return_value = 'PJM'
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='offpeak')
assert_series_equal(pd.Series(target['offpeak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='peak')
assert_series_equal(pd.Series(target['peak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='7x8')
assert_series_equal(pd.Series(target['7x8'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='2x16h')
assert_series_equal(pd.Series(target['2x16h'],
index=[],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', granularity='m', bucket='7X24')
assert_series_equal(pd.Series(target['monthly'],
index=[],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm.bucketize_price(mock_pjm, 'LMP', bucket='7X24', real_time=True)
with pytest.raises(ValueError):
tm.bucketize_price(mock_pjm, 'LMP', bucket='weekday')
with pytest.raises(ValueError):
tm.bucketize_price(mock_caiso, 'LMP', bucket='weekday')
with pytest.raises(ValueError):
tm.bucketize_price(mock_pjm, 'LMP', granularity='yearly')
replace.restore()
# No market data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'MISO'
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(dtype='float64'), pd.Series(actual))
replace.restore()
def test_forward_price():
# US Power
target = {
'7x24': [19.46101],
'peak': [23.86745],
'J20 7x24': [18.11768888888889],
'J20-K20 7x24': [19.283921311475414],
'J20-K20 offpeak': [15.82870707070707],
'J20-K20 7x8': [13.020144262295084],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_forward_price)
mock_spp = Index('MA001', AssetClass.Commod, 'SPP')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'SPP'
# Should return empty series as mark for '7x8' bucket is missing
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['J20 7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='PEAK'
)
assert_series_equal(pd.Series(target['peak'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['J20-K20 7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='offpeak'
)
assert_series_equal(pd.Series(target['J20-K20 offpeak'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='7x8'
)
assert_series_equal(pd.Series(target['J20-K20 7x8'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='lmp',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='5Q20',
bucket='PEAK'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='Invalid',
bucket='PEAK'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='3H20',
bucket='7x24'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='F20-I20',
bucket='7x24'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2H20',
bucket='7x24',
real_time=True
)
replace.restore()
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_missing_bucket_forward_price)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'SPP'
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(), pd.Series(actual), check_names=False)
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='PEAK'
)
assert_series_equal(pd.Series(target['peak'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['J20-K20 7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
replace.restore()
# No market data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'SPP'
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(dtype='float64'), pd.Series(actual))
replace.restore()
def test_natgas_forward_price():
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_natgas_forward_price)
mock = CommodityNaturalGasHub('MA001', 'AGT')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = pd.Series(tm.forward_price(mock,
price_method='GDD',
contract_range='F21'))
expected = pd.Series([2.880], index=[datetime.date(2019, 1, 2)], name='price')
assert_series_equal(expected, actual)
actual = pd.Series(tm.forward_price(mock,
price_method='GDD',
contract_range='F21-G21'))
expected = pd.Series([2.8629152542372878], index=[datetime.date(2019, 1, 2)], name='price')
assert_series_equal(expected, actual)
with pytest.raises(ValueError):
tm.forward_price(mock,
price_method='GDD',
contract_range='F21-I21')
with pytest.raises(ValueError):
tm.forward_price(mock,
price_method='GDD',
contract_range='I21')
replace.restore()
# No market data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.forward_price(mock,
price_method='GDD',
contract_range='F21')
assert_series_equal(pd.Series(dtype='float64'), pd.Series(actual))
replace.restore()
def test_get_iso_data():
tz_map = {'MISO': 'US/Central', 'CAISO': 'US/Pacific'}
for key in tz_map:
assert (tm._get_iso_data(key)[0] == tz_map[key])
def test_string_to_date_interval():
assert (tm._string_to_date_interval("K20")['start_date'] == datetime.date(2020, 5, 1))
assert (tm._string_to_date_interval("K20")['end_date'] == datetime.date(2020, 5, 31))
assert (tm._string_to_date_interval("k20")['start_date'] == datetime.date(2020, 5, 1))
assert (tm._string_to_date_interval("k20")['end_date'] == datetime.date(2020, 5, 31))
assert (tm._string_to_date_interval("Cal22")['start_date'] == datetime.date(2022, 1, 1))
assert (tm._string_to_date_interval("Cal22")['end_date'] == datetime.date(2022, 12, 31))
assert (tm._string_to_date_interval("Cal2012")['start_date'] == datetime.date(2012, 1, 1))
assert (tm._string_to_date_interval("Cal2012")['end_date'] == datetime.date(2012, 12, 31))
assert (tm._string_to_date_interval("Cal53")['start_date'] == datetime.date(1953, 1, 1))
assert (tm._string_to_date_interval("Cal53")['end_date'] == datetime.date(1953, 12, 31))
assert (tm._string_to_date_interval("2010")['start_date'] == datetime.date(2010, 1, 1))
assert (tm._string_to_date_interval("2010")['end_date'] == datetime.date(2010, 12, 31))
assert (tm._string_to_date_interval("3Q20")['start_date'] == datetime.date(2020, 7, 1))
assert (tm._string_to_date_interval("3Q20")['end_date'] == datetime.date(2020, 9, 30))
assert (tm._string_to_date_interval("2h2021")['start_date'] == datetime.date(2021, 7, 1))
assert (tm._string_to_date_interval("2h2021")['end_date'] == datetime.date(2021, 12, 31))
assert (tm._string_to_date_interval("3q20")['start_date'] == datetime.date(2020, 7, 1))
assert (tm._string_to_date_interval("3q20")['end_date'] == datetime.date(2020, 9, 30))
assert (tm._string_to_date_interval("2H2021")['start_date'] == datetime.date(2021, 7, 1))
assert (tm._string_to_date_interval("2H2021")['end_date'] == datetime.date(2021, 12, 31))
assert (tm._string_to_date_interval("Mar2021")['start_date'] == datetime.date(2021, 3, 1))
assert (tm._string_to_date_interval("Mar2021")['end_date'] == datetime.date(2021, 3, 31))
assert (tm._string_to_date_interval("March2021")['start_date'] == datetime.date(2021, 3, 1))
assert (tm._string_to_date_interval("March2021")['end_date'] == datetime.date(2021, 3, 31))
assert (tm._string_to_date_interval("5Q20") == "Invalid Quarter")
assert (tm._string_to_date_interval("HH2021") == "Invalid num")
assert (tm._string_to_date_interval("3H2021") == "Invalid Half Year")
assert (tm._string_to_date_interval("Cal2a") == "Invalid year")
assert (tm._string_to_date_interval("Marc201") == "Invalid date code")
assert (tm._string_to_date_interval("M1a2021") == "Invalid date code")
assert (tm._string_to_date_interval("Marcha2021") == "Invalid date code")
assert (tm._string_to_date_interval("I20") == "Invalid month")
assert (tm._string_to_date_interval("20") == "Unknown date code")
def test_implied_vol_commod():
target = {
'F21': [2.880],
'F21-H21': [2.815756],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_implied_volatility)
mock = Index('MA001', AssetClass.Commod, 'Option NG Exchange')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.implied_volatility(mock,
tenor='F21-H21')
assert_series_equal(pd.Series(target['F21-H21'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
replace.restore()
def test_fair_price():
target = {
'F21': [2.880],
'F21-H21': [2.815756],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fair_price)
mock = Index('MA001', AssetClass.Commod, 'Swap NG Exchange')
mock2 = Swap('MA002', AssetClass.Commod, 'Swap Oil')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.fair_price(mock,
tenor='F21')
assert_series_equal(pd.Series(target['F21'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm.fair_price(mock,
tenor=None)
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fair_price_swap)
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.fair_price(mock2)
assert_series_equal(pd.Series([2.880],
index=[pd.Timestamp('2019-01-02')],
name='fairPrice'),
pd.Series(actual),
)
replace.restore()
def test_weighted_average_valuation_curve_for_calendar_strip():
target = {
'F21': [2.880],
'F21-H21': [2.815756],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fair_price)
mock = Index('MA001', AssetClass.Commod, 'Swap NG Exchange')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='F21',
query_type=QueryType.FAIR_PRICE,
measure_field='fairPrice'
)
assert_series_equal(pd.Series(target['F21'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='F21-H21',
query_type=QueryType.FAIR_PRICE,
measure_field='fairPrice'
)
assert_series_equal(pd.Series(target['F21-H21'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='Invalid',
query_type=QueryType.FAIR_PRICE,
measure_field='fairPrice'
)
with pytest.raises(ValueError):
tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='F20-I20',
query_type=QueryType.FAIR_PRICE,
measure_field='fairPrice'
)
with pytest.raises(ValueError):
tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='3H20',
query_type=QueryType.PRICE,
measure_field='fairPrice'
)
replace.restore()
def test_fundamental_metrics():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
period = '1y'
direction = tm.FundamentalMetricPeriodDirection.FORWARD
actual = tm.dividend_yield(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.dividend_yield(..., period, direction, real_time=True)
actual = tm.earnings_per_share(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.earnings_per_share(..., period, direction, real_time=True)
actual = tm.earnings_per_share_positive(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.earnings_per_share_positive(..., period, direction, real_time=True)
actual = tm.net_debt_to_ebitda(mock_spx, period, direction)
assert_series_equal( | pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric') | pandas.Series |
from natsort import natsorted
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.stats import spearmanr
def load_TargetScan(gene):
# Load TargetScan predictions for gene
return set(open("data/TargetScan_{}.txt".format(gene)).read().strip().split("\n"))
def load_paired_tables(path):
# Open isomiR / mRNA TCGA tables and select paired samples
df_isomiR = pd.read_csv("{}/isomiR_normal.tsv".format(path), sep="\t", index_col=0)
df_mRNA = pd.read_csv("{}/mRNA_normal.tsv".format(path), sep="\t", index_col=0)
paired_samples = natsorted(list(set(df_isomiR.columns).intersection(df_mRNA.columns)))
df_isomiR = df_isomiR[paired_samples]
df_mRNA = df_mRNA[paired_samples]
return df_isomiR, df_mRNA
def load_tables_for_organ(organ):
# Merge all TCGA samples for specified organ
global organs_projects, isomiR_thr_quantile
dfs_isomiR, dfs_mRNA = [], []
for project in organs_projects[organ]:
df_isomiR, df_mRNA = load_paired_tables("data/TCGA/{}".format(project))
# Select highly expressed isomiR's
medians = df_isomiR.median(axis=1)
isomiR_thr = np.quantile(medians[medians > 0], isomiR_thr_quantile)
df_isomiR = df_isomiR.loc[ df_isomiR.median(axis=1) >= isomiR_thr ]
dfs_isomiR.append(df_isomiR)
dfs_mRNA.append(df_mRNA)
common_isomiRs = set.intersection(*[set(df_isomiR.index) for df_isomiR in dfs_isomiR])
dfs_isomiR = [df_isomiR.loc[common_isomiRs] for df_isomiR in dfs_isomiR]
df_isomiR = | pd.concat(dfs_isomiR, axis=1) | pandas.concat |
# use selenium to simulate web browser (need to download selenium or create a docker image)
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.select import Select
import requests
from pandas import DataFrame
from datetime import datetime
CURRENT_YEAR = datetime.now().year
def load_candidates(path):
'''
Takes path to csv and returns a Pandas DataFrame with all the information
'''
csv = path
return DataFrame(csv)
def login(driver, username, password):
'''
Logs into website
Notes:
Make sure to include Firefox/Chrome driver within you PATH variable
'''
u = driver.find_element_by_name('UserID')
u.clear()
u.send_keys(username)
pw = driver.find_element_by_name('PassPhrase')
pw.clear()
pw.send_keys(password)
try:
driver.find_element_by_name("btnAction").click()
except Exception as e:
print(e)
def goto_election_page(driver):
'''
Enters the election page to choose between
Args:
Returns:c
Notes:
Searches through both fall and spring semester election links and selects current semester
links returns 3 elements 0: fall, 1: spring, 2: officer elections
'''
links = driver.find_elements_by_partial_link_text('Election')
links[1].click() # temp for now
''' Need to replace links[1].click() with this more general statement
for link in links:
if CURRENT_YEAR in link:
link.click()
'''
def check_boxes(driver, classification, candidates):
'''
Goes through all pages and checks all boxes
'''
if classification == 'Junior':
driver.find_element_by_xpath("//input[@value='Continue to Juniors']").click()
elif classification == 'Senior':
driver.find_element_by_xpath("//input[@value='Continue to Seniors']").click()
else:
return
candidate_count = 0
while True:
for i in range(20):
# s = Select(driver.find_element_by_xpath)('//input[@value=' + candidates['email'][i] + ']')
# candidate_count += 1
try:
s = Select(driver.find_element_by_name("Rejected" + str(i + 1)))
s.select_by_value('A2')
except Exception:
continue
try:
driver.find_elements_by_name('btnAction')[0].click()
# driver.find_element_by_xpath("//input[@value='Continue']")
except Exception:
break
driver.find_elements_by_name('btnAction')[1].click()
# driver.find_element_by_xpath("//input[@name='btnAction' and @value='Back']")
if __name__ == '__main__':
candidates = | DataFrame.read_csv('../spreadsheets/candidates.csv') | pandas.DataFrame.read_csv |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta
import sys
import os
import unittest
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, DatetimeIndex,
Int64Index, to_datetime, bdate_range)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import(
range, long, StringIO, lrange, lmap, map, zip, cPickle as pickle, product
)
from pandas import read_pickle
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
from pandas.core.datetools import BDay
import pandas.core.common as com
from pandas import concat
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
# infortunately, too much has changed to handle these legacy pickles
# class TestLegacySupport(unittest.TestCase):
class LegacySupport(object):
_multiprocess_can_split_ = True
@classmethod
def setUpClass(cls):
if compat.PY3:
raise nose.SkipTest("not compatible with Python >= 3")
pth, _ = os.path.split(os.path.abspath(__file__))
filepath = os.path.join(pth, 'data', 'frame.pickle')
with open(filepath, 'rb') as f:
cls.frame = pickle.load(f)
filepath = os.path.join(pth, 'data', 'series.pickle')
with open(filepath, 'rb') as f:
cls.series = pickle.load(f)
def test_pass_offset_warn(self):
buf = StringIO()
sys.stderr = buf
DatetimeIndex(start='1/1/2000', periods=10, offset='H')
sys.stderr = sys.__stderr__
def test_unpickle_legacy_frame(self):
dtindex = DatetimeIndex(start='1/3/2005', end='1/14/2005',
freq=BDay(1))
unpickled = self.frame
self.assertEquals(type(unpickled.index), DatetimeIndex)
self.assertEquals(len(unpickled), 10)
self.assert_((unpickled.columns == Int64Index(np.arange(5))).all())
self.assert_((unpickled.index == dtindex).all())
self.assertEquals(unpickled.index.offset, BDay(1, normalize=True))
def test_unpickle_legacy_series(self):
from pandas.core.datetools import BDay
unpickled = self.series
dtindex = DatetimeIndex(start='1/3/2005', end='1/14/2005',
freq=BDay(1))
self.assertEquals(type(unpickled.index), DatetimeIndex)
self.assertEquals(len(unpickled), 10)
self.assert_((unpickled.index == dtindex).all())
self.assertEquals(unpickled.index.offset, BDay(1, normalize=True))
def test_unpickle_legacy_len0_daterange(self):
pth, _ = os.path.split(os.path.abspath(__file__))
filepath = os.path.join(pth, 'data', 'series_daterange0.pickle')
result = pd.read_pickle(filepath)
ex_index = DatetimeIndex([], freq='B')
self.assert_(result.index.equals(ex_index))
tm.assert_isinstance(result.index.freq, offsets.BDay)
self.assertEqual(len(result), 0)
def test_arithmetic_interaction(self):
index = self.frame.index
obj_index = index.asobject
dseries = Series(rand(len(index)), index=index)
oseries = Series(dseries.values, index=obj_index)
result = dseries + oseries
expected = dseries * 2
tm.assert_isinstance(result.index, DatetimeIndex)
assert_series_equal(result, expected)
result = dseries + oseries[:5]
expected = dseries + dseries[:5]
tm.assert_isinstance(result.index, DatetimeIndex)
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import pandas as pd
import numpy as np
import copy
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
from sklearn.feature_selection import mutual_info_classif, SelectKBest
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from datetime import datetime
from os import listdir
from os.path import isfile, join
import sys
import math
from sklearn.metrics import accuracy_score, f1_score
import re
from Extractor import get_word_length_matrix, get_word_length_matrix_with_interval, get_average_word_length, \
get_word_length_matrix_with_margin, get_char_count, get_digits, get_sum_digits, get_word_n_grams, \
get_char_affix_n_grams, get_char_word_n_grams, get_char_punct_n_grams, get_pos_tags_n_grams, get_bow_matrix, \
get_yules_k, get_special_char_matrix, get_function_words, get_pos_tags, get_sentence_end_start, \
get_flesch_reading_ease_vector, get_sentence_count, get_word_count
from sklearn.preprocessing import StandardScaler, Normalizer
# Chapter 7.1.1. method to trim a feature with low sum e.g. ngrams lower then 5
def trim_df_sum_feature(par_df, par_n):
par_df = par_df.fillna(value=0)
columns = par_df.columns.to_numpy()
data_array = par_df.to_numpy(dtype=float)
sum_arr = data_array.sum(axis=0)
# reduce n if 0 features would be returned
while len(par_df.columns) - len(np.where(sum_arr < par_n)[0]) == 0:
par_n -= 1
positions = list(np.where(sum_arr < par_n))
columns = np.delete(columns, positions)
data_array = np.delete(data_array, positions, axis=1)
return pd.DataFrame(data=data_array, columns=columns)
# Chapter 7.1.1. method to trim feature with low occurrence over all article
def trim_df_by_occurrence(par_df, n):
df_masked = par_df.notnull().astype('int')
word_rate = df_masked.sum()
columns = []
filtered_bow = pd.DataFrame()
for i in range(0, len(word_rate)):
if word_rate[i] > n:
columns.append(word_rate.index[i])
for c in columns:
filtered_bow[c] = par_df[c]
return filtered_bow
# Chapter 7.1.1. Process of filtering the data with low occurrence and save the filtered features in a new file
def filter_low_occurrence():
df_bow = pd.read_csv("daten/raw/bow.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"BOW before: {len(df_bow.columns)}")
df_bow = trim_df_by_occurrence(df_bow, 1)
print(f"BOW after: {len(df_bow.columns)}")
df_bow.to_csv(f"daten/2_filter_low_occurrence/bow.csv", index=False)
for n in range(2, 7):
word_n_gram = pd.read_csv(f"daten/raw/word_{n}_gram.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"Word_{n}_gram before: {len(word_n_gram.columns)}")
word_n_gram = trim_df_by_occurrence(word_n_gram, 1)
print(f"Word_{n}_gram after: {len(word_n_gram.columns)}")
word_n_gram.to_csv(f"daten/2_filter_low_occurrence/word_{n}_gram.csv", index=False)
for n in range(2, 6):
char_affix_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_affix_{n}_gram_1.csv", sep=',',
encoding="utf-8", nrows=2500)
print(f"char_affix_{n}_gram before: {len(char_affix_n_gram.columns)}")
char_affix_n_gram = trim_df_sum_feature(char_affix_n_gram, 5)
print(f"char_affix_{n}_gram after: {len(char_affix_n_gram.columns)}")
char_affix_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_affix_{n}_gram.csv", index=False)
char_word_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_word_{n}_gram_1.csv", sep=',',
encoding="utf-8", nrows=2500)
print(f"char_word_{n}_gram before: {len(char_word_n_gram.columns)}")
char_word_n_gram = trim_df_sum_feature(char_word_n_gram, 5)
print(f"char_word_{n}_gram after: {len(char_word_n_gram.columns)}")
char_word_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_word_{n}_gram.csv", index=False)
char_punct_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_punct_{n}_gram_1.csv", sep=',',
encoding="utf-8", nrows=2500)
print(f"char_punct_{n}_gram before: {len(char_punct_n_gram.columns)}")
char_punct_n_gram = trim_df_sum_feature(char_punct_n_gram, 5)
print(f"char_punct_{n}_gram after: {len(char_punct_n_gram.columns)}")
char_punct_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_punct_{n}_gram.csv", index=False)
df_f_word = pd.read_csv("daten/raw/function_words.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"Function Words before: {len(df_f_word.columns)}")
df_f_word = trim_df_by_occurrence(df_f_word, 1)
print(f"Function Words after: {len(df_f_word.columns)}")
df_f_word.to_csv(f"daten/2_filter_low_occurrence/function_words.csv", index=False)
for n in range(2, 6):
pos_tags_n_gram = pd.read_csv(f"daten/raw/pos_tag_{n}_gram.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"pos_tag_{n}_gram before: {len(pos_tags_n_gram.columns)}")
pos_tags_n_gram = trim_df_by_occurrence(pos_tags_n_gram, 1)
print(f"pos_tag_{n}_gram after: {len(pos_tags_n_gram.columns)}")
pos_tags_n_gram.to_csv(f"daten/2_filter_low_occurrence/pos_tag_{n}_gram.csv", index=False)
# Chapter 7.1.2. method to filter words based on document frequency
def trim_df_by_doc_freq(par_df, par_doc_freq):
df_masked = par_df.notnull().astype('int')
word_rate = df_masked.sum() / len(par_df)
columns = []
filtered_bow = pd.DataFrame()
for i in range(0, len(word_rate)):
if word_rate[i] < par_doc_freq:
columns.append(word_rate.index[i])
for c in columns:
filtered_bow[c] = par_df[c]
return filtered_bow
# Chapter 7.1.2 Process of filtering the data with high document frequency and save the filtered features in a new file
def filter_high_document_frequency():
# Filter words with high document frequency
df_bow = pd.read_csv("daten/2_filter_low_occurrence/bow.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"BOW before: {len(df_bow.columns)}")
df_bow = trim_df_by_doc_freq(df_bow, 0.5)
print(f"BOW after: {len(df_bow.columns)}")
df_bow.to_csv(f"daten/3_fiter_high_frequency/bow.csv", index=False)
df_f_word = pd.read_csv("daten/2_filter_low_occurrence/function_words.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"Function Word before: {len(df_f_word.columns)}")
df_f_word = trim_df_by_doc_freq(df_f_word, 0.5)
print(f"Function Word after: {len(df_f_word.columns)}")
df_f_word.to_csv(f"daten/3_fiter_high_frequency/function_words.csv", index=False)
for n in range(2, 7):
word_n_gram = pd.read_csv(f"daten/2_filter_low_occurrence/word_{n}_gram.csv", sep=',', encoding="utf-8",
nrows=2500)
print(f"Word_{n}_gram before: {len(word_n_gram.columns)}")
word_n_gram = trim_df_by_doc_freq(word_n_gram, 0.5)
print(f"Word_{n}_gram after: {len(word_n_gram.columns)}")
word_n_gram.to_csv(f"daten/3_fiter_high_frequency/word_{n}_gram.csv", index=False)
# Chapter 7.1.4. get the relative frequency based on a length metric (char, word, sentence)
def get_rel_frequency(par_df_count, par_df_len_metric_vector):
df_rel_freq = pd.DataFrame(columns=par_df_count.columns)
for index, row in par_df_count.iterrows():
df_rel_freq = df_rel_freq.append(row.div(par_df_len_metric_vector[index]))
return df_rel_freq
# Chapter 7.1.4. whole process of the chapter. Get the individual relative frequency of a feature and compare
# the correlation to the article length from the absolute and relative feature, save the feature with the estimated
# relative frequency in a new file
def individual_relative_frequency():
df_len_metrics = pd.read_csv(f"daten/1_raw/length_metrics.csv", sep=',', encoding="utf-8", nrows=2500)
# different metrics for individual relative frequencies
metrics = ['word_count', 'char_count', 'sentence_count']
for m in metrics:
# The csv is placed in a folder based on the metric for the individual relative frequency
path = f'daten/4_relative_frequency/{m}'
files = [f for f in listdir(path) if isfile(join(path, f))]
for f in files:
x = pd.read_csv(f"daten/4_relative_frequency/{m}/{f}",
sep=',', encoding="utf-8", nrows=2500).fillna(value=0)
x_rel = get_rel_frequency(x, df_len_metrics[m])
# Save the CSV with relative frequency
x_rel.to_csv(
f"daten/4_relative_frequency/{f.split('.')[0]}"
f"_rel.csv", index=False)
# Correlation is always between the metrics and the word_count
x['word_count'] = df_len_metrics['word_count']
x_rel['word_count'] = df_len_metrics['word_count']
# only on the test data 60/40 split
x_train, x_test = train_test_split(x, test_size=0.4, random_state=42)
x_train_rel, x_test_rel = train_test_split(x_rel, test_size=0.4, random_state=42)
# Calculate the median correlation
print(f"{f}_abs: {x_train.corr(method='pearson', min_periods=1)['word_count'].iloc[:-1].mean()}")
print(f"{f}_rel: {x_train_rel.corr(method='pearson', min_periods=1)['word_count'].iloc[:-1].mean()}")
# Chapter 7.2.1 First step of the iterative filter: Rank the features
def sort_features_by_score(par_x, par_y, par_select_metric):
# Get a sorted ranking of all features by the selected metric
selector = SelectKBest(par_select_metric, k='all')
selector.fit(par_x, par_y)
# Sort the features by their score
return pd.DataFrame(dict(feature_names=par_x.columns, scores=selector.scores_)).sort_values('scores',
ascending=False)
# Chapter 7.2.1 method to get the best percentile for GNB
def get_best_percentile_gnb(par_x_train, par_y_train, par_iter, par_df_sorted_features, step):
result_list = []
gnb = GaussianNB()
best_perc_round = par_iter - 1 # If no other point is found, highest amount of features (-1 starts to count from 0)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if len(par_y_train.index) / len(np.unique(par_y_train.values).tolist()) < 10:
cv = int(len(par_y_train.index) / len(np.unique(par_y_train.values).tolist())) - 1
else:
cv = 10
for perc_features in np.arange(step, par_iter + 1, step):
start_time = datetime.now()
# 1%*i best features to keep and create new dataframe with those only
number_of_features = int(perc_features * (len(par_x_train.columns) / 100))
# minimum one feature
number_of_features = 1 if number_of_features < 1 else number_of_features
feature_list = par_df_sorted_features['feature_names'][: number_of_features].tolist()
x_new_training = copy.deepcopy(par_x_train[feature_list])
# GNB Training
result_list.append(
cross_val_score(gnb, x_new_training, par_y_train, cv=cv, n_jobs=-1, scoring='accuracy').mean())
# Compares the accuracy with the 5 following points => needs 6 points minimum
if len(result_list) > 5:
# list starts to count at 0, subtract one more from len
difference_list_p2p = [result_list[p + 1] - result_list[p] for p in
range(len(result_list) - 6, len(result_list) - 1)]
difference_list_1p = [result_list[p + 1] - result_list[len(result_list) - 6] for p in
range(len(result_list) - 6, len(result_list) - 1)]
# Find the best percent if 5 following points were lower then the point before or had a deviation <= 0.5%
# or all points are 2% lower then the first point
if all(point_y <= 0 for point_y in difference_list_p2p) or \
all(-0.005 <= point_y <= 0.005 for point_y in difference_list_1p) or \
all(point_y < -0.02 for point_y in difference_list_1p):
# the best perc is the results - 6 point in the result list
best_perc_round = len(result_list) - 6
break
# Console Output
print(f"GNB Round {perc_features / step}: {datetime.now() - start_time}")
# Optimization of the best percent
# If any point with a lower percent is higher, it is the new optimum
if any(point_y > result_list[best_perc_round] for point_y in result_list[:len(result_list) - 5]):
best_perc_round = result_list.index(max(result_list[:len(result_list) - 5]))
# Tradeoff of 0.5% accuracy for lesser percent of features
# As long as there is a lesser maximum with 1% lesser accuracy, which has a minimum of 2% less percent features
better_perc_exists = True
best_accuracy_tradeoff = result_list[best_perc_round] - 0.01
# If there are no 5% left for the tradeoff there is no better perc
if best_perc_round - int(2 / step) < 0:
better_perc_exists = False
while better_perc_exists:
earliest_pos = best_perc_round - int(2 / step)
# if its less then 0 it starts to count backside
earliest_pos = 0 if earliest_pos < 0 else earliest_pos
if any(point_y > best_accuracy_tradeoff for point_y in result_list[:earliest_pos]):
best_perc_round = result_list.index(max(result_list[:earliest_pos]))
else:
better_perc_exists = False
# the best percent of the features is calculated by the percent start plus the rounds * step
best_perc = step + step * best_perc_round
print(best_perc)
return best_perc, best_perc_round, result_list
# Chapter 7.2.1 method to get the best percentile for SVC
def get_best_percentile_svc(par_x_train, par_y_train, par_iter, par_df_sorted_features, step):
result_list = []
# Parameter for SVC
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if len(par_y_train.index) / len(np.unique(par_y_train.values).tolist()) < 10:
cv = int(len(par_y_train.index) / len(np.unique(par_y_train.values).tolist())) - 1
else:
cv = 10
best_perc_round = par_iter - 1 # If no other point is found, highest amount of features (-1 starts to count from 0)
for perc_features in np.arange(step, par_iter + 1, step):
start_time = datetime.now()
# 1%*i best features to keep and create new dataframe with those only
number_of_features = int(perc_features * (len(par_x_train.columns) / 100))
# minimum one feature
number_of_features = 1 if number_of_features < 1 else number_of_features
feature_list = par_df_sorted_features['feature_names'][: number_of_features].tolist()
x_new_training = copy.deepcopy(par_x_train[feature_list])
# SVC Test
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=cv, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_new_training, par_y_train)
result_list.append(grid_results.best_score_)
# Compares the accuracy with the 5 following points => needs 6 points minimum
if len(result_list) > 5:
# list starts to count at 0, subtract one more from len
difference_list_p2p = [result_list[p + 1] - result_list[p] for p in
range(len(result_list) - 6, len(result_list) - 1)]
difference_list_1p = [result_list[p + 1] - result_list[len(result_list) - 6] for p in
range(len(result_list) - 6, len(result_list) - 1)]
# Find the best percent if 5 following points were lower then the point before or had a deviation <= 0.5%
# or all points are 2% lower then the first point
if all(point_y <= 0 for point_y in difference_list_p2p) or \
all(-0.005 <= point_y <= 0.005 for point_y in difference_list_1p) or \
all(point_y < -0.02 for point_y in difference_list_1p):
# the best perc is the results - 6 point in the result list
best_perc_round = len(result_list) - 6
break
# Console Output
print(f"SVC Round {perc_features / step}: {datetime.now() - start_time}")
# Optimization of the best percent
# If any point with a lower percent is higher, it is the new optimum
if any(point_y > result_list[best_perc_round] for point_y in result_list[:len(result_list) - 5]):
best_perc_round = result_list.index(max(result_list[:len(result_list) - 5]))
# Tradeoff of 1% accuracy for lesser percent of features
# As long as there is a lesser maximum with 1% lesser accuracy, which has a minimum of 2% less percent features
better_perc_exists = True
best_accuracy_tradeoff = result_list[best_perc_round] - 0.01
# If there are no 5% left for the tradeoff there is no better perc
if best_perc_round - int(2 / step) < 0:
better_perc_exists = False
while better_perc_exists:
earliest_pos = best_perc_round - int(2 / step)
# if its less then 0 it starts to count backside
earliest_pos = 0 if earliest_pos < 0 else earliest_pos
if any(point_y > best_accuracy_tradeoff for point_y in result_list[:earliest_pos]):
best_perc_round = result_list.index(max(result_list[:earliest_pos]))
else:
better_perc_exists = False
# the best percent of the features is calculated by the percent start plus the rounds * step
best_perc = step + step * best_perc_round
print(best_perc)
return best_perc, best_perc_round, result_list
# Chapter 7.2.1 method to get the best percentile for KNN
def get_best_percentile_knn(par_x_train, par_y_train, par_iter, par_df_sorted_features, step):
result_list = []
best_perc_round = par_iter - 1 # If no other point is found, highest amount of features (-1 starts to count from 0)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if len(par_y_train.index) / len(np.unique(par_y_train.values).tolist()) < 10:
cv = int(len(par_y_train.index) / len(np.unique(par_y_train.values).tolist())) - 1
else:
cv = 10
for perc_features in np.arange(step, par_iter + 1, step):
start_time = datetime.now()
# 1%*i best features to keep and create new dataframe with those only
number_of_features = int(perc_features * (len(par_x_train.columns) / 100))
# minimum one feature
number_of_features = 1 if number_of_features < 1 else number_of_features
feature_list = par_df_sorted_features['feature_names'][: number_of_features].tolist()
x_new_training = copy.deepcopy(par_x_train[feature_list])
# Parameter for KNN
# Some Values from 3 to square of samples
neighbors = [i for i in range(3, int(math.sqrt(len(x_new_training.index))), 13)]
neighbors += [1, 3, 5, 11, 19, 36]
if int(math.sqrt(len(feature_list))) not in neighbors:
neighbors.append(int(math.sqrt(len(x_new_training.index))))
# Not more neighbors then samples-2
neighbors = [x for x in neighbors if x < len(x_new_training.index) - 2]
# remove duplicates
neighbors = list(set(neighbors))
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN Training
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=cv, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_new_training, par_y_train)
result_list.append(grid_results.best_score_)
# Compares the accuracy with the 5 following points => needs 6 points minimum
if len(result_list) > 5:
# list starts to count at 0, subtract one more from len
difference_list_p2p = [result_list[p + 1] - result_list[p] for p in
range(len(result_list) - 6, len(result_list) - 1)]
difference_list_1p = [result_list[p + 1] - result_list[len(result_list) - 6] for p in
range(len(result_list) - 6, len(result_list) - 1)]
# Find the best percent if 5 following points were lower then the point before or had a deviation <= 0.5%
# or all points are 2% lower then the first point
if all(point_y <= 0 for point_y in difference_list_p2p) or \
all(-0.005 <= point_y <= 0.005 for point_y in difference_list_1p) or \
all(point_y < -0.02 for point_y in difference_list_1p):
# the best perc is the results - 6 point in the result list
best_perc_round = len(result_list) - 6
break
# Console Output
print(f"KNN Round {perc_features / step}: {datetime.now() - start_time}")
# Optimization of the best percent
# If any point with a lower percent is higher, it is the new optimum
if any(point_y > result_list[best_perc_round] for point_y in result_list[:len(result_list) - 5]):
best_perc_round = result_list.index(max(result_list[:len(result_list) - 5]))
# Tradeoff of 1% accuracy for lesser percent of features
# As long as there is a lesser maximum with 1% lesser accuracy, which has a minimum of 2% less percent features
better_perc_exists = True
best_accuracy_tradeoff = result_list[best_perc_round] - 0.01
# If there are no 5% left for the tradeoff there is no better perc
if best_perc_round - int(2 / step) < 0:
better_perc_exists = False
while better_perc_exists:
earliest_pos = best_perc_round - int(2 / step)
# if its less then 0 it starts to count backside
earliest_pos = 0 if earliest_pos < 0 else earliest_pos
if any(point_y >= best_accuracy_tradeoff for point_y in result_list[:earliest_pos]):
best_perc_round = result_list.index(max(result_list[:earliest_pos]))
else:
better_perc_exists = False
# the best percent of the features is calculated by the percent start plus the rounds * step
best_perc = step + step * best_perc_round
print(best_perc)
return best_perc, best_perc_round, result_list
# Chapter 7.2.1 Filter the feature based on the estimated best percentile and save it into a new file
def print_filter_feature_percentile(par_path, par_df_sorted_features, par_percent, par_x, par_file_name):
# select the 1 percent of the features (len/100) multiplied by par_best_percent
number_features = round(par_percent * (len(par_x.columns) / 100))
# If the 1st percent is less then 1
number_features = 1 if number_features < 1 else number_features
feature_list = par_df_sorted_features['feature_names'][:number_features].tolist()
# print the name of the features in a file
original_stdout = sys.stdout
with open(f'{par_path}selected_features/{par_file_name}_filtered.txt', 'w', encoding="utf-8") as f:
sys.stdout = f
print(f"Features: {len(feature_list)}")
print(f"{feature_list}")
sys.stdout = original_stdout
# select the best features from the original dataset
par_x[feature_list].to_csv(f"{par_path}csv_after_filter/{par_file_name}_filtered.csv", index=False)
# Chapter 7.2.1 Complete process of the iterative Filter
def iterative_filter_process(par_path, par_df, par_num_texts, par_num_authors):
y = par_df['label_encoded']
path = f'{par_path}csv_before_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
# Filter the files for author and text numbers if 'all' is not set.
if par_num_authors != "all":
r = re.compile(f"a{par_num_authors}_")
files = list(filter(r.match, files))
if par_num_authors != "all":
r = re.compile(f".*t{par_num_texts}_")
files = list(filter(r.match, files))
step_perc = 1.0
for f in files:
filename = f.split(".")[0]
print(f)
x = pd.read_csv(f"{par_path}csv_before_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# Get sorted features
df_sorted_features = sort_features_by_score(x_train, y_train, mutual_info_classif)
# Calculate the best percentiles of the data for the different classifier
best_perc_gnb, best_round_gnb, result_list_gnb = get_best_percentile_gnb(x_train, y_train, 50,
df_sorted_features, step_perc)
best_perc_svc, best_round_svc, result_list_svc = get_best_percentile_svc(x_train, y_train, 50,
df_sorted_features, step_perc)
best_perc_knn, best_round_knn, result_list_knn = get_best_percentile_knn(x_train, y_train, 50,
df_sorted_features, step_perc)
# select the beast features from the original dataset
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_gnb, x, "gnb_" + filename)
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_svc, x, "svc_" + filename)
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_knn, x, "knn_" + filename)
# print best perc to a file
original_stdout = sys.stdout
with open(f'{par_path}best_perc/{filename}.txt', 'w') as f:
sys.stdout = f
print(f"best_perc_gnb: ({best_perc_gnb}|{result_list_gnb[best_round_gnb]})\n"
f"best_perc_svc: ({best_perc_svc}|{result_list_svc[best_round_svc]})\n"
f"best_perc_knn: ({best_perc_knn}|{result_list_knn[best_round_knn]})")
sys.stdout = original_stdout
# draw diagram
len_list = [len(result_list_gnb), len(result_list_svc), len(result_list_knn)]
plt.plot([i * step_perc for i in range(1, len(result_list_gnb) + 1)], result_list_gnb, 'r-', label="gnb")
plt.plot(best_perc_gnb, result_list_gnb[best_round_gnb], 'rx')
plt.plot([i * step_perc for i in range(1, len(result_list_svc) + 1)], result_list_svc, 'g-', label="svc")
plt.plot(best_perc_svc, result_list_svc[best_round_svc], 'gx')
plt.plot([i * step_perc for i in range(1, len(result_list_knn) + 1)], result_list_knn, 'b-', label="knn")
plt.plot(best_perc_knn, result_list_knn[best_round_knn], 'bx')
plt.axis([step_perc, (max(len_list) + 1) * step_perc, 0, 1])
plt.xlabel('Daten in %')
plt.ylabel('Genauigkeit')
plt.legend()
plt.savefig(f"{par_path}/diagrams/{filename}")
plt.cla()
# print accuracy to file
df_percent = pd.DataFrame(data=[i * step_perc for i in range(1, max(len_list) + 1)], columns=['percent'])
df_gnb = pd.DataFrame(data=result_list_gnb, columns=['gnb'])
df_svc = pd.DataFrame(data=result_list_svc, columns=['svc'])
df_knn = pd.DataFrame(data=result_list_knn, columns=['knn'])
df_accuracy = pd.concat([df_percent, df_gnb, df_svc, df_knn], axis=1)
df_accuracy = df_accuracy.fillna(value="")
df_accuracy.to_csv(f'{par_path}accuracy/{filename}_filtered.csv', index=False)
# Chapter 8.1. and later, basically the process of the iterative filter only with the svc classifier
def iterative_filter_process_svm(par_path, par_df, par_num_texts, par_num_authors):
y = par_df['label_encoded']
path = f'{par_path}csv_before_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
# Filter the files for author and text numbers if 'all' is not set.
if par_num_authors != "all":
r = re.compile(f"a{par_num_authors}_")
files = list(filter(r.match, files))
if par_num_authors != "all":
r = re.compile(f".*t{par_num_texts}_")
files = list(filter(r.match, files))
step_perc = 1.0
for f in files:
filename = f.split(".")[0]
print(f)
x = pd.read_csv(f"{par_path}csv_before_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# Get sorted features
df_sorted_features = sort_features_by_score(x_train, y_train, mutual_info_classif)
# Calculate the best percentiles of the data for svc
best_perc_svc, best_round_svc, result_list_svc = get_best_percentile_svc(x_train, y_train, 50,
df_sorted_features, step_perc)
# select the beast features from the original dataset
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_svc, x, filename)
# print best perc to a file
original_stdout = sys.stdout
with open(f'{par_path}best_perc/{filename}.txt', 'w') as out_f:
sys.stdout = out_f
print(f"best_perc_svc: ({best_perc_svc}|{result_list_svc[best_round_svc]})\n")
sys.stdout = original_stdout
# draw diagram
plt.plot([i * step_perc for i in range(1, len(result_list_svc) + 1)], result_list_svc, 'g-', label="svc")
plt.plot(best_perc_svc, result_list_svc[best_round_svc], 'gx')
plt.axis([step_perc, (len(result_list_svc) + 1) * step_perc, 0, 1])
plt.xlabel('Daten in %')
plt.ylabel('Genauigkeit')
plt.legend()
plt.savefig(f"{par_path}/diagrams/{filename}")
plt.cla()
# print accuracy to file
df_percent = pd.DataFrame(data=[i * step_perc for i in range(1, len(result_list_svc) + 1)], columns=['percent'])
df_svc = pd.DataFrame(data=result_list_svc, columns=['svc'])
df_accuracy = pd.concat([df_percent, df_svc], axis=1)
df_accuracy = df_accuracy.fillna(value="")
df_accuracy.to_csv(f'{par_path}accuracy/{filename}_filtered.csv', index=False)
# Chapter 7.2.1. Get the accuracy of the features before the iterative filter, results in table 18
def get_accuracy_before_iterative_filter():
gnb_result_list, svc_result_list, knn_result_list, gnb_time_list, svc_time_list, knn_time_list \
= [], [], [], [], [], []
y = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8", nrows=2500)['label_encoded']
path = f'daten/5_iterative_filter/csv_before_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
gnb = GaussianNB()
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# Get the feature names for the table
feature_list = [re.search("(.+?(?=_rel))", f).group(1) for f in files]
for f in files:
print(f)
x = pd.read_csv(f"daten/5_iterative_filter/csv_before_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42)
# GNB fit
start_time = datetime.now()
gnb.fit(x_train, y_train)
# score on test data
score = accuracy_score(gnb.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"GNB test score for {f}: {score}")
print(f"GNB time for {f}: {time_taken}")
gnb_result_list.append(score)
gnb_time_list.append(time_taken)
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
start_time = datetime.now()
# fit on train data
svc.fit(x_train, y_train)
# predict test data
score = accuracy_score(svc.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"SVC test score for {f}: {score}")
print(f"SVC time for {f}: {time_taken}")
svc_result_list.append(score)
svc_time_list.append(time_taken)
# Parameter for KNN
# Some Values from 3 to square of k
neighbors = [i for i in range(3, int(math.sqrt(len(x.columns))), 13)]
neighbors += [5, 11, 19, 36]
if int(math.sqrt(len(x.columns))) not in neighbors:
neighbors.append(int(math.sqrt(len(x.columns))))
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN parameter optimization
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
knn = KNeighborsClassifier(n_neighbors=grid_results.best_params_['n_neighbors'],
metric=grid_results.best_params_['metric'],
weights=grid_results.best_params_['weights'])
# fit on train data
knn.fit(x_train, y_train)
# KNN predict test data
start_time = datetime.now()
# predict test data
score = accuracy_score(knn.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"KNN test score for {f}: {score}")
print(f"KNN time for {f}: {time_taken}")
knn_result_list.append(score)
knn_time_list.append(time_taken)
# create dataframe with the scores and times
df_results = pd.DataFrame()
df_results['feature'] = feature_list
df_results['score_gnb'] = gnb_result_list
df_results['time_gnb'] = gnb_time_list
df_results['score_svc'] = svc_result_list
df_results['time_svc'] = svc_time_list
df_results['score_knn'] = knn_result_list
df_results['time_knn'] = knn_time_list
return df_results
# Chapter 7.2.1. Get the accuracy of the features after the iterative filter, results in table 18
def get_accuracy_after_iterative_filter():
df_gnb_result = pd.DataFrame(columns=['feature', 'score_gnb', 'time_gnb'])
df_svc_result = pd.DataFrame(columns=['feature', 'score_svc', 'time_svc'])
df_knn_result = pd.DataFrame(columns=['feature', 'score_knn', 'time_knn'])
y = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8", nrows=2500)['label_encoded']
# path = f'daten/5_iterative_filter/csv_after_filter'
path = f'daten/5_iterative_filter/5_iterative_filter/csv_after_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
gnb = GaussianNB()
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
for f in files:
print(f)
# Get the feature name for the table
feature = re.search(".{4}(.+?(?=_rel))", f).group(1)
# x = pd.read_csv(f"daten/5_iterative_filter/csv_after_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x = pd.read_csv(f"daten/5_iterative_filter/5_iterative_filter/csv_after_filter/{f}", sep=',', encoding="utf-8",
nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42)
# Select the classifier by the start of the filename
if f.split("_")[0] == "gnb":
# GNB fit
start_time = datetime.now()
gnb.fit(x_train, y_train)
# score on test data
score = accuracy_score(gnb.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"GNB test score for {f}: {score}")
print(f"GNB time for {f}: {time_taken}")
df_gnb_result = df_gnb_result.append(pd.DataFrame(data={'feature': [feature], 'score_gnb': [score],
'time_gnb': [time_taken]}), ignore_index=True)
elif f.split("_")[0] == "svc":
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
start_time = datetime.now()
# fit on train data
svc.fit(x_train, y_train)
# predict test data
score = accuracy_score(svc.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"SVC test score for {f}: {score}")
print(f"SVC training time for {f}: {time_taken}")
df_svc_result = df_svc_result.append(pd.DataFrame(data={'feature': [feature], 'score_svc': [score],
'time_svc': [time_taken]}), ignore_index=True)
elif f.split("_")[0] == "knn":
# Parameter for KNN
# Some Values from 3 to square of k
neighbors = [i for i in range(3, int(math.sqrt(len(x.columns))), 13)]
neighbors += [5, 11, 19, 36]
if int(math.sqrt(len(x.columns))) not in neighbors:
neighbors.append(int(math.sqrt(len(x.columns))))
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN parameter optimization
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
knn = KNeighborsClassifier(n_neighbors=grid_results.best_params_['n_neighbors'],
metric=grid_results.best_params_['metric'],
weights=grid_results.best_params_['weights'])
start_time = datetime.now()
# fit on train data
knn.fit(x_train, y_train)
# KNN predict test data
start_time = datetime.now()
# predict test data
score = accuracy_score(knn.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"KNN test score for {f}: {score}")
print(f"KNN test time for {f}: {time_taken}")
df_knn_result = df_knn_result.append(pd.DataFrame(data={'feature': [feature], 'score_knn': [score],
'time_knn': [time_taken]}), ignore_index=True)
df_merge = pd.merge(df_gnb_result, df_knn_result, on="feature", how='outer')
df_merge = pd.merge(df_merge, df_svc_result, on="feature", how='outer')
return df_merge
# Get n article for a given number of authors. Required for setups with different numbers of authors and article
def get_n_article_by_author(par_df, par_label_count, par_article_count):
df_articles = pd.DataFrame(columns=['label_encoded', 'text'])
# only keep entries of the "par_label_count" first labels
par_df = par_df.where(par_df['label_encoded'] <= par_label_count).dropna()
labels = np.unique(par_df['label_encoded'].values).tolist()
list_article_count = [par_article_count for i in labels]
for index, row in par_df.iterrows():
if list_article_count[labels.index(row['label_encoded'])] != 0:
d = {'label_encoded': [row['label_encoded']], 'text': [row['text']]}
df_articles = df_articles.append(pd.DataFrame.from_dict(d), ignore_index=True)
list_article_count[labels.index(row['label_encoded'])] -= 1
if sum(list_article_count) == 0:
break
return df_articles
# Return indices for n article for a given number of authors. Required for setups with different
# numbers of authors and article
def get_n_article_index_by_author(par_df, par_label_count, par_article_count):
index_list = []
# only keep entries of the "par_label_count" first labels
par_df = par_df.where(par_df['label_encoded'] <= par_label_count).dropna()
labels = np.unique(par_df['label_encoded'].values).tolist()
list_article_count = [par_article_count for i in labels]
for index, row in par_df.iterrows():
if row['label_encoded'] in labels:
if list_article_count[labels.index(row['label_encoded'])] != 0:
index_list.append(index)
list_article_count[labels.index(row['label_encoded'])] -= 1
if sum(list_article_count) == 0:
break
return index_list
# Method to estimate the f1 score of the test data for GNB
def get_f1_for_gnb(par_x_train, par_x_test, par_y_train, par_y_test):
gnb = GaussianNB()
# GNB fit
gnb.fit(par_x_train, par_y_train)
# score on test data
gnb_score = f1_score(gnb.predict(par_x_test), par_y_test, average='micro')
return gnb_score
# Method to estimate the f1 score of the test data for SVC
def get_f1_for_svc(par_x_train, par_x_test, par_y_train, par_y_test, par_cv):
# Param Grid SVC
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=par_cv, n_jobs=-1, scoring='f1_micro')
grid_results = grid_search.fit(par_x_train, par_y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
# fit on train data
svc.fit(par_x_train, par_y_train)
# predict test data
svc_score = f1_score(svc.predict(par_x_test), par_y_test, average='micro')
return svc_score
# Method to estimate the f1 score of the test data for KNN
def get_f1_for_knn(par_x_train, par_x_test, par_y_train, par_y_test, par_cv):
# define param grid for knn, neighbors has the be lower than samples
neighbors = [1, 3, 5, 11, 19, 36, 50]
# number of neighbors must be less than number of samples
neighbors = [x for x in neighbors if x < len(par_x_test)]
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN parameter optimization
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=par_cv, n_jobs=-1, scoring='f1_micro')
grid_results = grid_search.fit(par_x_train, par_y_train)
knn = KNeighborsClassifier(n_neighbors=grid_results.best_params_['n_neighbors'],
metric=grid_results.best_params_['metric'],
weights=grid_results.best_params_['weights'])
# fit on train data
knn.fit(par_x_train, par_y_train)
# predict test data
knn_score = f1_score(knn.predict(par_x_test), par_y_test, average='micro')
return knn_score
# Method to estimate the accuracy of the test data for SVC
def get_accuracy_for_svc(par_x_train, par_x_test, par_y_train, par_y_test, par_cv):
# Param Grid SVC
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=par_cv, n_jobs=-1, scoring='f1_micro')
grid_results = grid_search.fit(par_x_train, par_y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
# fit on train data
svc.fit(par_x_train, par_y_train)
# predict test data
svc_score = accuracy_score(svc.predict(par_x_test), par_y_test)
return svc_score
# Chapter 7.3.1. comparison of the word length feature alternatives
def compare_word_length_features():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'wl_matrix_gnb': [], 'wl_matrix_svc': [], 'wl_matrix_knn': [],
'wl_matrix_bins_20_30_gnb': [], 'wl_matrix_bins_20_30_svc': [], 'wl_matrix_bins_20_30_knn': [],
'wl_matrix_bins_10_20_gnb': [], 'wl_matrix_bins_10_20_svc': [], 'wl_matrix_bins_10_20_knn': [],
'wl_matrix_20_gnb': [], 'wl_matrix_20_svc': [], 'wl_matrix_20_knn': [],
'wl_avg_gnb': [], 'wl_avg_svc': [], 'wl_avg_knn': []}
for author_texts in list_author_texts:
# get article for n authors with number of author texts
df_article = get_n_article_by_author(df_all_texts, 25, author_texts)
# Get the word count for the individual relative frequency
word_count = get_word_count(df_article)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
else:
cv = 10
# Get the scores for every feature
for feature in ["wl_matrix", "wl_matrix_bins_20_30", "wl_matrix_bins_10_20", "wl_avg", "wl_matrix_20"]:
# select the test/train data by the feature name and calculate the individual relative frequency
if feature == "wl_matrix":
x = get_rel_frequency(get_word_length_matrix(df_article).fillna(value=0), word_count['word_count'])
elif feature == "wl_matrix_bins_20_30":
x = get_rel_frequency(get_word_length_matrix_with_interval(df_article, 20, 30).fillna(value=0),
word_count['word_count'])
elif feature == "wl_matrix_bins_10_20":
x = get_rel_frequency(get_word_length_matrix_with_interval(df_article, 10, 20).fillna(value=0),
word_count['word_count'])
elif feature == "wl_avg":
x = get_average_word_length(df_article)
elif feature == "wl_matrix_20":
x = get_word_length_matrix_with_margin(df_article, 20)
# Scale the data, else high counter in wl_matrix can dominate and hyperparameter optimization for svc
# takes a while because of small differences from average
scaler = StandardScaler()
scaler.fit(x)
x = scaler.transform(x)
y = df_article['label_encoded']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
y = df_article['label_encoded']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# calculate scores
gnb_score = get_f1_for_gnb(x_train, x_test, y_train, y_test)
svc_score = get_f1_for_svc(x_train, x_test, y_train, y_test, cv)
knn_score = get_f1_for_knn(x_train, x_test, y_train, y_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.2. comparison of the digit feature alternatives
def compare_digit_features():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'digit_sum_gnb': [], 'digit_sum_svc': [], 'digit_sum_knn': [],
'digits_gnb': [], 'digits_svc': [], 'digits_knn': []}
for author_texts in list_author_texts:
# get article for n authors with number of author texts
df_article = get_n_article_by_author(df_all_texts, 25, author_texts)
# Get the word count for the individual relative frequency
char_count = get_char_count(df_article)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
else:
cv = 10
# Get the scores for every feature
for feature in ["digit_sum", "digits"]:
# select the test/train data by the feature name and calculate the individual relative frequency
if feature == "digit_sum":
x = get_rel_frequency(get_sum_digits(df_article).fillna(value=0), char_count['char_count'])
elif feature == "digits":
x = get_rel_frequency(get_digits(df_article).fillna(value=0), char_count['char_count'])
y = df_article['label_encoded']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# calculate scores
gnb_score = get_f1_for_gnb(x_train, x_test, y_train, y_test)
svc_score = get_f1_for_svc(x_train, x_test, y_train, y_test, cv)
knn_score = get_f1_for_knn(x_train, x_test, y_train, y_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.3. comparison of the word ngrams with n 4-6
def compare_word_4_6_grams():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'w4g_gnb': [], 'w4g_svc': [], 'w4g_knn': [],
'w5g_gnb': [], 'w5g_svc': [], 'w5g_knn': [],
'w6g_gnb': [], 'w6g_svc': [], 'w6g_knn': []}
# load the data
df_w4g = pd.read_csv("daten/6_feature_analysis/input_data/word_4_gram_rel.csv", sep=',', encoding="utf-8")
df_w5g = pd.read_csv("daten/6_feature_analysis/input_data/word_5_gram_rel.csv", sep=',', encoding="utf-8")
df_w6g = pd.read_csv("daten/6_feature_analysis/input_data/word_6_gram_rel.csv", sep=',', encoding="utf-8")
for author_texts in list_author_texts:
# indices for article for n authors with m texts
index_list = get_n_article_index_by_author(df_all_texts, 25, author_texts)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
# Get the scores for every feature
for feature in ["w4g", "w5g", "w6g"]:
# select the indices from the article rows by the given indices
if feature == "w4g":
x = df_w4g.iloc[index_list]
elif feature == "w5g":
x = df_w5g.iloc[index_list]
elif feature == "w6g":
x = df_w6g.iloc[index_list]
# Delete features which only occur once
x = trim_df_by_occurrence(x, 1)
# reset the indices to have a order from 0 to authors * text per author - 1
x = x.reset_index(drop=True)
y = df_all_texts.iloc[index_list]['label_encoded']
y = y.reset_index(drop=True)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# calculate scores
gnb_score = get_f1_for_gnb(x_train, x_test, y_train, y_test)
svc_score = get_f1_for_svc(x_train, x_test, y_train, y_test, cv)
knn_score = get_f1_for_knn(x_train, x_test, y_train, y_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.3. comparison of the word ngrams with n 2-3
def compare_word_2_3_grams():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'w2g_gnb': [], 'w2g_svc': [], 'w2g_knn': [],
'w3g_gnb': [], 'w3g_svc': [], 'w3g_knn': []}
for author_texts in list_author_texts:
print(f"Texte pro Autor: {author_texts}")
# indices for article for n authors with m texts
index_list = get_n_article_index_by_author(df_balanced, 25, author_texts)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
# select the indices from the article rows by the given indices
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
print(f"Artikel: {len(df_balanced.index)}")
# extract the features
df_w2g = get_word_n_grams(df_balanced, 2)
df_w3g = get_word_n_grams(df_balanced, 3)
# Preprocessing steps
word_count = get_word_count(df_balanced)
df_w2g = preprocessing_steps_pos_tag_n_grams(df_w2g, word_count['word_count'])
df_w3g = preprocessing_steps_pos_tag_n_grams(df_w3g, word_count['word_count'])
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_w2g[df_w2g.columns] = scaler.fit_transform(df_w2g[df_w2g.columns])
df_w3g[df_w3g.columns] = scaler.fit_transform(df_w3g[df_w3g.columns])
label = df_balanced['label_encoded']
# Train/Test 60/40 split
df_w2g_train, df_w2g_test, df_w3g_train, df_w3g_test, label_train, label_test = \
train_test_split(df_w2g, df_w3g, label, test_size=0.4, random_state=42, stratify=label)
# Get the scores for every feature
for feature in ["w2g", "w3g"]:
# select the indices from the article rows by the given indices
# iterative filter
# returns df_x_train_gnb, df_x_test_gnb, df_x_train_svc, df_x_test_svc, df_x_train_knn, df_x_test_knn
if feature == "w2g":
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test = \
feature_selection_iterative_filter(df_w2g_train, df_w2g_test, label_train, 1.0, mutual_info_classif)
elif feature == "w3g":
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test = \
feature_selection_iterative_filter(df_w3g_train, df_w3g_test, label_train, 1.0, mutual_info_classif)
# Do not use iterative filter for gnb train caused by bad results
x_gnb_train, x_gnb_test, label_train, label_test = \
train_test_split(df_w3g, label, test_size=0.4, random_state=42, stratify=label)
print(f"cv: {cv}")
print(f"Train Labels: {label_train.value_counts()}")
print(f"Test Labels: {label_test.value_counts()}")
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.4. comparison of the different lengths of char ngrams
# Chapter 7.3.4. whole process of the comparison of the char-n-gram features
def compare_char_n_grams_process(par_base_path):
df_all_texts = pd.read_csv(f"musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
author_counts = [25]
text_counts = [10, 15, 25, 50, 75, 100]
for number_authors in author_counts:
for number_texts in text_counts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
extract_n_gram_features_to_csv(df_balanced, par_base_path, number_authors, number_texts)
iterative_filter_process(par_base_path, df_balanced, number_texts, number_authors)
compare_char_affix_ngrams(text_counts, author_counts, par_base_path, df_all_texts) \
.to_csv(f"{par_base_path}results/char_affix_n_grams.csv", index=False)
compare_char_word_ngrams(text_counts, author_counts, par_base_path, df_all_texts) \
.to_csv(f"{par_base_path}results/char_word_n_grams.csv", index=False)
compare_char_punct_ngrams(text_counts, author_counts, par_base_path, df_all_texts) \
.to_csv(f"{par_base_path}results/char_punct_n_grams.csv", index=False)
# Chapter 7.3.4. char-affix-ngrams
def compare_char_affix_ngrams(par_author_texts, par_authors, par_base_path, par_df):
# save the results in a dictionary
dic_f1_results = {'c_affix_2_gnb': [], 'c_affix_2_svc': [], 'c_affix_2_knn': [],
'c_affix_3_gnb': [], 'c_affix_3_svc': [], 'c_affix_3_knn': [],
'c_affix_4_gnb': [], 'c_affix_4_svc': [], 'c_affix_4_knn': [],
'c_affix_5_gnb': [], 'c_affix_5_svc': [], 'c_affix_5_knn': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(par_df, number_authors, number_texts)
df_balanced = par_df.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Get the scores for every feature
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
for feature in ["c_affix_2", "c_affix_3", "c_affix_4", "c_affix_5"]:
# read the data based on n, texts and authors
if feature == "c_affix_2":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_affix_2_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_affix_2_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_affix_2_gram_filtered.csv")
elif feature == "c_affix_3":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_affix_3_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_affix_3_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_affix_3_gram_filtered.csv")
elif feature == "c_affix_4":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_affix_4_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_affix_4_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_affix_4_gram_filtered.csv")
elif feature == "c_affix_5":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_affix_5_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_affix_5_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_affix_5_gram_filtered.csv")
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_gnb[df_gnb.columns] = scaler.fit_transform(df_gnb[df_gnb.columns])
df_svc[df_svc.columns] = scaler.fit_transform(df_svc[df_svc.columns])
df_knn[df_knn.columns] = scaler.fit_transform(df_knn[df_knn.columns])
# Train/Test 60/40 split
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test, label_train, label_test = \
train_test_split(df_gnb, df_svc, df_knn, label, test_size=0.4, random_state=42, stratify=label)
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {number_authors} authors and {number_texts} texts: {gnb_score}")
print(f"SVC-Score for {feature} with {number_authors} authors and {number_texts} texts: {svc_score}")
print(f"KNN-Score for {feature} with {number_authors} authors and {number_texts} texts: {knn_score}")
return pd.DataFrame(dic_f1_results)
# Chapter 7.3.4. char-word-ngrams
def compare_char_word_ngrams(par_author_texts, par_authors, par_base_path, par_df):
# save the results in a dictionary
dic_f1_results = {'c_word_2_gnb': [], 'c_word_2_svc': [], 'c_word_2_knn': [],
'c_word_3_gnb': [], 'c_word_3_svc': [], 'c_word_3_knn': [],
'c_word_4_gnb': [], 'c_word_4_svc': [], 'c_word_4_knn': [],
'c_word_5_gnb': [], 'c_word_5_svc': [], 'c_word_5_knn': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(par_df, number_authors, number_texts)
df_balanced = par_df.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Get the scores for every feature
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
for feature in ["c_word_2", "c_word_3", "c_word_4", "c_word_5"]:
# read the data based on n, texts and authors
if feature == "c_word_2":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_word_2_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_word_2_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_word_2_gram_filtered.csv")
elif feature == "c_word_3":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_word_3_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_word_3_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_word_3_gram_filtered.csv")
elif feature == "c_word_4":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_word_4_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_word_4_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_word_4_gram_filtered.csv")
elif feature == "c_word_5":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_word_5_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_word_5_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_word_5_gram_filtered.csv")
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_gnb[df_gnb.columns] = scaler.fit_transform(df_gnb[df_gnb.columns])
df_svc[df_svc.columns] = scaler.fit_transform(df_svc[df_svc.columns])
df_knn[df_knn.columns] = scaler.fit_transform(df_knn[df_knn.columns])
# Train/Test 60/40 split
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test, label_train, label_test = \
train_test_split(df_gnb, df_svc, df_knn, label, test_size=0.4, random_state=42, stratify=label)
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {number_authors} authors and {number_texts} texts: {gnb_score}")
print(f"SVC-Score for {feature} with {number_authors} authors and {number_texts} texts: {svc_score}")
print(f"KNN-Score for {feature} with {number_authors} authors and {number_texts} texts: {knn_score}")
return pd.DataFrame(dic_f1_results)
# Chapter 7.3.4. char-punct-ngrams
def compare_char_punct_ngrams(par_author_texts, par_authors, par_base_path, par_df):
# save the results in a dictionary
dic_f1_results = {'c_punct_2_gnb': [], 'c_punct_2_svc': [], 'c_punct_2_knn': [],
'c_punct_3_gnb': [], 'c_punct_3_svc': [], 'c_punct_3_knn': [],
'c_punct_4_gnb': [], 'c_punct_4_svc': [], 'c_punct_4_knn': [],
'c_punct_5_gnb': [], 'c_punct_5_svc': [], 'c_punct_5_knn': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(par_df, number_authors, number_texts)
df_balanced = par_df.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Get the scores for every feature
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
for feature in ["c_punct_2", "c_punct_3", "c_punct_4", "c_punct_5"]:
# read the data based on n, texts and authors
if feature == "c_punct_2":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_punct_2_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_punct_2_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_punct_2_gram_filtered.csv")
elif feature == "c_punct_3":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_punct_3_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_punct_3_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_punct_3_gram_filtered.csv")
elif feature == "c_punct_4":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_punct_4_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_punct_4_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_punct_4_gram_filtered.csv")
elif feature == "c_punct_5":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_punct_5_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_punct_5_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_punct_5_gram_filtered.csv")
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_gnb[df_gnb.columns] = scaler.fit_transform(df_gnb[df_gnb.columns])
df_svc[df_svc.columns] = scaler.fit_transform(df_svc[df_svc.columns])
df_knn[df_knn.columns] = scaler.fit_transform(df_knn[df_knn.columns])
# Train/Test 60/40 split
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test, label_train, label_test = \
train_test_split(df_gnb, df_svc, df_knn, label, test_size=0.4, random_state=42, stratify=label)
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {number_authors} authors and {number_texts} texts: {gnb_score}")
print(f"SVC-Score for {feature} with {number_authors} authors and {number_texts} texts: {svc_score}")
print(f"KNN-Score for {feature} with {number_authors} authors and {number_texts} texts: {knn_score}")
return pd.DataFrame(dic_f1_results)
# Chapter 7.3.4. Print the char-n-gram features in different files
def extract_n_gram_features_to_csv(par_df, par_base_path, par_number_authors, par_number_texts):
char_count = get_char_count(par_df)
# n from 2-5
for n in range(2, 6):
ca_ng = get_char_affix_n_grams(par_df, n)
preprocessing_steps_char_n_grams(ca_ng, char_count['char_count'])\
.to_csv(f"{par_base_path}csv_before_filter/a{par_number_authors}_t{par_number_texts}"
f"_char_affix_{n}_gram.csv", index=False)
cw_ng = get_char_word_n_grams(par_df, n)
preprocessing_steps_char_n_grams(cw_ng, char_count['char_count'])\
.to_csv(f"{par_base_path}csv_before_filter/a{par_number_authors}_t{par_number_texts}"
f"_char_word_{n}_gram.csv", index=False)
cp_ng = get_char_punct_n_grams(par_df, n)
preprocessing_steps_char_n_grams(cp_ng, char_count['char_count'])\
.to_csv(f"{par_base_path}csv_before_filter/a{par_number_authors}_t{par_number_texts}"
f"_char_punct_{n}_gram.csv", index=False)
print(f"Extraction Round {n - 1} done")
return True
# combined preprocessing steps of the pos-tag-n-grams
def preprocessing_steps_pos_tag_n_grams(par_feature, length_metric):
# Filter features which only occur once
par_feature = trim_df_by_occurrence(par_feature, 1)
# Individual relative frequency
par_feature = get_rel_frequency(par_feature.fillna(value=0), length_metric)
return par_feature
# combined preprocessing steps of the char-n-grams
def preprocessing_steps_char_n_grams(par_feature, length_metric):
# Filter features which only occur once
par_feature = trim_df_sum_feature(par_feature, 5)
# Individual relative frequency
par_feature = get_rel_frequency(par_feature.fillna(value=0), length_metric)
return par_feature
# Feature selection with the iterative filter without printing the results in a file
def feature_selection_iterative_filter(par_x_train, par_x_test, par_y_train, par_step, par_classif):
df_sorted_features = sort_features_by_score(par_x_train, par_y_train, par_classif)
# Calculate the best percentiles of the data for the different classifier
best_perc_gnb = get_best_percentile_gnb(par_x_train, par_y_train, 50, df_sorted_features, par_step)[0]
best_perc_svc = get_best_percentile_svc(par_x_train, par_y_train, 50, df_sorted_features, par_step)[0]
best_perc_knn = get_best_percentile_knn(par_x_train, par_y_train, 50, df_sorted_features, par_step)[0]
# select the 1 percent of the features (len/100) multiplied by par_best_percent
# select the best features from the original dataset
df_x_train_gnb = par_x_train[
df_sorted_features['feature_names'][: round(best_perc_gnb * (len(par_x_train.columns) / 100))].tolist()]
df_x_test_gnb = par_x_test[
df_sorted_features['feature_names'][: round(best_perc_gnb * (len(par_x_train.columns) / 100))].tolist()]
df_x_train_svc = par_x_train[
df_sorted_features['feature_names'][: round(best_perc_svc * (len(par_x_train.columns) / 100))].tolist()]
df_x_test_svc = par_x_test[
df_sorted_features['feature_names'][: round(best_perc_svc * (len(par_x_train.columns) / 100))].tolist()]
df_x_train_knn = par_x_train[
df_sorted_features['feature_names'][: round(best_perc_knn * (len(par_x_train.columns) / 100))].tolist()]
df_x_test_knn = par_x_test[
df_sorted_features['feature_names'][: round(best_perc_knn * (len(par_x_train.columns) / 100))].tolist()]
return df_x_train_gnb, df_x_test_gnb, df_x_train_svc, df_x_test_svc, df_x_train_knn, df_x_test_knn
# Chapter 7.3.5. function to compare the pos-tag-n-grams
def compare_pos_tag_ngrams(par_author_texts, par_authors, par_base_path, par_df):
# save the results in a dictionary
dic_f1_results = {'pos_2_gnb': [], 'pos_2_svc': [], 'pos_2_knn': [],
'pos_3_gnb': [], 'pos_3_svc': [], 'pos_3_knn': [],
'pos_4_gnb': [], 'pos_4_svc': [], 'pos_4_knn': [],
'pos_5_gnb': [], 'pos_5_svc': [], 'pos_5_knn': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(par_df, number_authors, number_texts)
df_balanced = par_df.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Get the scores for every feature
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
for feature in ["pos_2", "pos_3", "pos_4", "pos_5"]:
# read the data based on n, texts and authors
if feature == "pos_2":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_pos_tag_2_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_pos_tag_2_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_pos_tag_2_gram_filtered.csv")
elif feature == "pos_3":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_pos_tag_3_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_pos_tag_3_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_pos_tag_3_gram_filtered.csv")
elif feature == "pos_4":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_pos_tag_4_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_pos_tag_4_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_pos_tag_4_gram_filtered.csv")
elif feature == "pos_5":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_pos_tag_5_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_pos_tag_5_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_pos_tag_5_gram_filtered.csv")
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_gnb[df_gnb.columns] = scaler.fit_transform(df_gnb[df_gnb.columns])
df_svc[df_svc.columns] = scaler.fit_transform(df_svc[df_svc.columns])
df_knn[df_knn.columns] = scaler.fit_transform(df_knn[df_knn.columns])
# Train/Test 60/40 split
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test, label_train, label_test = \
train_test_split(df_gnb, df_svc, df_knn, label, test_size=0.4, random_state=42, stratify=label)
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {number_authors} authors and {number_texts} texts: {gnb_score}")
print(f"SVC-Score for {feature} with {number_authors} authors and {number_texts} texts: {svc_score}")
print(f"KNN-Score for {feature} with {number_authors} authors and {number_texts} texts: {knn_score}")
return pd.DataFrame(dic_f1_results)
# Chapter 7.3.5. complete process of the pos-tag-n-grams comparison
def compare_pos_n_grams_process(par_base_path):
df_all_texts = pd.read_csv(f"musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
author_counts = [25]
text_counts = [10, 15, 25, 50, 75, 100]
for number_authors in author_counts:
for number_texts in text_counts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
word_count = get_word_count(df_balanced)
# extract features and preprocessing
for n in range(2, 6):
pt_ng = get_pos_tags_n_grams(df_balanced, n)
preprocessing_steps_pos_tag_n_grams(pt_ng, word_count['word_count']) \
.to_csv(f"{par_base_path}csv_before_filter/"
f"a{number_authors}_t{number_texts}_pos_tag_{n}_gram.csv", index=False)
iterative_filter_process(par_base_path, df_balanced, number_texts, number_authors)
# 2 grams for svc get not filtered, overwrite unfiltered for svc
pt_ng = get_pos_tags_n_grams(df_balanced, 2)
preprocessing_steps_pos_tag_n_grams(pt_ng, word_count['word_count']) \
.to_csv(f"{par_base_path}csv_after_filter/"
f"svc_a{number_authors}_t{number_texts}_pos_tag_2_gram_filtered.csv", index=False)
compare_pos_tag_ngrams(text_counts, author_counts, par_base_path, df_all_texts) \
.to_csv(f"{par_base_path}results/pos_tag_n_grams.csv", index=False)
# Method to print all features for different counts of authors and texts
# Including all Preprocessing steps and filtering
def print_all_features_svc(par_base_path, par_article_path):
df_all_texts = pd.read_csv(f"{par_article_path}", sep=',', encoding="utf-8")
author_counts = [2, 3, 4, 5, 10, 15, 25]
text_counts = [5, 10, 15, 25, 50, 75, 100]
for number_authors in author_counts:
for number_texts in text_counts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
# get all the features
df_bow = get_bow_matrix(df_balanced)
df_word_2g = get_word_n_grams(df_balanced, 2)
df_word_count = get_word_count(df_balanced)
df_word_length = get_word_length_matrix_with_margin(df_balanced, 20)
df_yules_k = get_yules_k(df_balanced)
sc_label_vector = ["!", "„", "“", "§", "$", "%", "&", "/", "(", ")", "=", "?", "{", "}", "[", "]", "\\",
"@", "#",
"‚", "‘", "-", "_", "+", "*", ".", ",", ";"]
special_char_matrix = get_special_char_matrix(df_balanced, sc_label_vector)
sc_label_vector = ["s_char:" + sc for sc in sc_label_vector]
df_special_char = pd.DataFrame(data=special_char_matrix, columns=sc_label_vector)
df_char_affix_4g = get_char_affix_n_grams(df_balanced, 4)
df_char_word_3g = get_char_word_n_grams(df_balanced, 3)
df_char_punct_3g = get_char_punct_n_grams(df_balanced, 3)
df_digits = get_sum_digits(df_balanced)
df_fwords = get_function_words(df_balanced)
df_pos_tags = get_pos_tags(df_balanced)
df_pos_tag_2g = get_pos_tags_n_grams(df_balanced, 2)
df_start_pos, df_end_pos = get_sentence_end_start(df_balanced)
df_start_end_pos = pd.concat([df_start_pos, df_end_pos], axis=1)
df_fre = get_flesch_reading_ease_vector(df_balanced)
# 7.1.1 Remove low occurrence
df_bow = trim_df_by_occurrence(df_bow, 1)
df_word_2g = trim_df_by_occurrence(df_word_2g, 1)
df_fwords = trim_df_by_occurrence(df_fwords, 1)
df_pos_tag_2g = trim_df_by_occurrence(df_pos_tag_2g, 1)
df_char_affix_4g = trim_df_sum_feature(df_char_affix_4g, 5)
df_char_word_3g = trim_df_sum_feature(df_char_word_3g, 5)
df_char_punct_3g = trim_df_sum_feature(df_char_punct_3g, 5)
# 7.1.2 Remove high frequency
df_bow = trim_df_by_doc_freq(df_bow, 0.5)
df_word_2g = trim_df_by_doc_freq(df_word_2g, 0.5)
df_fwords = trim_df_by_doc_freq(df_fwords, 0.5)
# 7.1.4 individual relative frequency
df_len_metrics = pd.concat([get_char_count(df_balanced), get_sentence_count(df_balanced),
df_word_count], axis=1)
df_bow = get_rel_frequency(df_bow.fillna(value=0), df_len_metrics['word_count'])
df_word_2g = get_rel_frequency(df_word_2g.fillna(value=0), df_len_metrics['word_count'])
df_word_length = get_rel_frequency(df_word_length.fillna(value=0), df_len_metrics['word_count'])
df_special_char = get_rel_frequency(df_special_char.fillna(value=0), df_len_metrics['char_count'])
df_char_affix_4g = get_rel_frequency(df_char_affix_4g.fillna(value=0), df_len_metrics['char_count'])
df_char_word_3g = get_rel_frequency(df_char_word_3g.fillna(value=0), df_len_metrics['char_count'])
df_char_punct_3g = get_rel_frequency(df_char_punct_3g.fillna(value=0), df_len_metrics['char_count'])
df_digits = get_rel_frequency(df_digits.fillna(value=0), df_len_metrics['char_count'])
df_fwords = get_rel_frequency(df_fwords.fillna(value=0), df_len_metrics['word_count'])
df_pos_tags = get_rel_frequency(df_pos_tags.fillna(value=0), df_len_metrics['word_count'])
df_pos_tag_2g = get_rel_frequency(df_pos_tag_2g.fillna(value=0), df_len_metrics['word_count'])
df_start_end_pos = get_rel_frequency(df_start_end_pos.fillna(value=0), df_len_metrics['sentence_count'])
# Print to CSV
# Files for iterative filter
df_bow.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}_bow.csv", index=False)
df_word_2g.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}"
f"_word_2_gram.csv", index=False)
df_char_affix_4g.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}"
f"_char_affix_4_gram.csv", index=False)
df_char_word_3g.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}"
f"_char_word_3_gram.csv", index=False)
df_char_punct_3g.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}"
f"_char_punct_3_gram.csv", index=False)
df_fwords.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}"
f"_function_words.csv", index=False)
# Files not for iterative filter directly in after filter folder
df_word_count.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_word_count.csv", index=False)
df_word_length.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_word_length.csv", index=False)
df_yules_k.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_yules_k.csv", index=False)
df_special_char.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_special_char.csv", index=False)
df_digits.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_digits.csv", index=False)
df_pos_tags.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_pos_tag.csv", index=False)
df_pos_tag_2g.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_pos_tag_2_gram.csv", index=False)
df_start_end_pos.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_pos_tag_start_end.csv", index=False)
df_fre.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}_fre.csv", index=False)
print(f"Extraction for {number_authors} authors with {number_texts} texts done. Starting iterative filter")
# Run the iterative filter
iterative_filter_process_svm(par_base_path, df_balanced, number_texts, number_authors)
# create a dataframe with the combined features for a specific number of authors and texts
# features can be excluded by name
def create_df_combined_features(par_path, par_num_texts, par_num_authors, par_exclude):
path = f'{par_path}csv_after_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
# Filter the files for author and text numbers
r = re.compile(f"a{par_num_authors}_")
files = list(filter(r.match, files))
r = re.compile(f".*t{par_num_texts}_")
files = list(filter(r.match, files))
# exclude a feature by regex
regex = re.compile(f'.*{par_exclude}')
files = [i for i in files if not regex.match(i)]
df_all = pd.DataFrame()
# combine all features
for feature in files:
df_feature = pd.read_csv(f"{par_path}csv_after_filter/{feature}", sep=',', encoding="utf-8")
df_all = pd.concat([df_all, df_feature], axis=1)
return df_all
# Chapter 8.4. comparison of normalization and standardization
def compare_normalization_standardization(par_article_path, par_feature_path, par_author_texts, par_authors):
df_all_texts = pd.read_csv(f"{par_article_path}", sep=',', encoding="utf-8")
dic_f1_results = {'without': [], 'standard': [], 'normal': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
df_features = create_df_combined_features(par_feature_path, number_texts, number_authors, "nothing")
# standardization of features
df_features_stand = copy.deepcopy(df_features)
scaler = StandardScaler()
df_features_stand[df_features_stand.columns] = \
scaler.fit_transform(df_features_stand[df_features_stand.columns])
# normalization of features
df_features_norm = copy.deepcopy(df_features)
normalizer = Normalizer()
df_features_norm[df_features_norm.columns] = \
normalizer.fit_transform(df_features_norm[df_features_norm.columns])
x_train, x_test, x_train_stand, x_test_stand, x_train_norm, x_test_norm, label_train, label_test = \
train_test_split(df_features, df_features_stand, df_features_norm, label,
test_size=0.4, random_state=42, stratify=label)
# append the results
dic_f1_results['without'].append(get_f1_for_svc(x_train, x_test, label_train, label_test, cv))
dic_f1_results['standard'].append(get_f1_for_svc(x_train_stand, x_test_stand, label_train,
label_test, cv))
dic_f1_results['normal'].append(get_f1_for_svc(x_train_norm, x_test_norm, label_train,
label_test, cv))
print(f"Scores for {number_authors} authors with {number_texts} texts created.")
return pd.DataFrame(dic_f1_results)
# Chapter 8.5.1. Comparison of the individual features, data for table 21
def compare_single_features(par_article_path, par_feature_path, par_author_texts, par_authors):
df_all_texts = pd.read_csv(f"{par_article_path}", sep=',', encoding="utf-8")
dic_results = {'number_authors': [], 'number_texts': []}
path = f'{par_feature_path}csv_after_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
# get unique values for the list of the features
feature_list = list(set([re.search(r"a\d+_t\d+_(.+?(?=$))", f).group(1) for f in files]))
for feature in feature_list:
dic_results[feature] = []
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Append authors and texts
dic_results['number_authors'].append(number_authors)
dic_results['number_texts'].append(number_texts)
for feature in feature_list:
df_feature = pd.read_csv(
f"{par_feature_path}csv_after_filter/a{number_authors}_t{number_texts}_{feature}")
# standardization of features
scaler = StandardScaler()
df_feature[df_feature.columns] = \
scaler.fit_transform(df_feature[df_feature.columns])
x_train, x_test, label_train, label_test = \
train_test_split(df_feature, label, test_size=0.4, random_state=42, stratify=label)
dic_results[feature].append(
get_f1_for_svc(x_train, x_test, label_train, label_test, cv))
print(f"Scores for {number_authors} authors with {number_texts} texts created.")
return pd.DataFrame(dic_results)
# Chapter 8.5.2. Get the values of the difference functions, data for table 22
def get_feature_function_difference(par_article_path, par_feature_path, par_author_texts, par_authors):
df_all_texts = pd.read_csv(f"{par_article_path}", sep=',', encoding="utf-8")
dic_f1_wo_feature = {'wo_bow': [], 'wo_word_2_gram': [], 'wo_word_count': [], 'wo_word_length': [],
'wo_yules_k': [], 'wo_special_char': [], 'wo_char_affix': [], 'wo_char_word': [],
'wo_char_punct': [], 'wo_digits': [], 'wo_function_words': [], 'wo_pos_tag.csv': [],
'wo_pos_tag_2_gram': [], 'wo_pos_tag_start_end': [], 'wo_fre': [], 'number_authors': [],
'number_texts': []}
dic_f1_diff_feature = {'diff_bow': [], 'diff_word_2_gram': [], 'diff_word_count': [], 'diff_word_length': [],
'diff_yules_k': [], 'diff_special_char': [], 'diff_char_affix': [], 'diff_char_word': [],
'diff_char_punct': [], 'diff_digits': [], 'diff_function_words': [], 'diff_pos_tag.csv': [],
'diff_pos_tag_2_gram': [], 'diff_pos_tag_start_end': [], 'diff_fre': [],
'number_authors': [],
'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Append authors and texts
dic_f1_wo_feature['number_authors'].append(number_authors)
dic_f1_wo_feature['number_texts'].append(number_texts)
dic_f1_diff_feature['number_authors'].append(number_authors)
dic_f1_diff_feature['number_texts'].append(number_texts)
# Read the f1 Score from the previous calculations
df_score_all = pd.read_csv(f"{par_feature_path}/results/compared_stand_normal.csv")
f1_score_all = df_score_all.loc[(df_score_all['number_authors'] == number_authors) &
(df_score_all['number_texts'] == number_texts)]['standard'].iloc[0]
for key in dic_f1_diff_feature:
if key != "number_authors" and key != "number_texts":
key = re.search(r'.+?(?=_)_(.*)', key).group(1)
# exclude the specific feature
df_features = create_df_combined_features(par_feature_path, number_texts, number_authors, key)
# standardization of features
scaler = StandardScaler()
df_features[df_features.columns] = \
scaler.fit_transform(df_features[df_features.columns])
x_train, x_test, label_train, label_test = \
train_test_split(df_features, label, test_size=0.4, random_state=42, stratify=label)
# append the results
score_wo = get_f1_for_svc(x_train, x_test, label_train, label_test, cv)
dic_f1_wo_feature[f'wo_{key}'].append(score_wo)
dic_f1_diff_feature[f'diff_{key}'].append(f1_score_all - score_wo)
print(f"{key} done for {number_authors} authors and {number_texts} texts.")
return pd.DataFrame(dic_f1_wo_feature), pd.DataFrame(dic_f1_diff_feature)
# Chapter 8.5.3. Comparison of the model with or without content features, picture 28
def compare_content_features(par_article_path, par_feature_path, par_author_texts, par_authors):
df_all_texts = | pd.read_csv(f"{par_article_path}", sep=',', encoding="utf-8") | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script saves bid and ask data for specified ETFs to files for each day
during market open hours.
It assumes the computer is at US East Coast Time.
@author: mark
"""
import os
import pandas as pd
import numpy as np
from itertools import product
import streamlit as st
from bokeh.plotting import figure
from bokeh.models.tools import HoverTool
from bokeh.models import NumeralTickFormatter, DatetimeTickFormatter, Rect, ColumnDataSource, VBar, LabelSet
from streamlit_metrics import metric_row
def display_method_to_choose_etfs(selected_method_choose_dates, all_etfs, etf_data, sl_obj):
"""
Generates various streamlit options for selecting which ETFs to display.
Parameters
----------
selected_method_choose_dates : list of str
Strings of the various methods of selecting ETFs.
all_etfs : list of str
List of all ETF tickers.
etf_data : pd.DataFrame
Dataframe containing bulk data about ETFs.
sl_obj : streamlit
Stremlit object to place the elements.
Returns
-------
selected_etfs : list of str
List of str tickers chosen by users.
"""
selected_etfs = all_etfs
if 'By volume traded' in selected_method_choose_dates:
selection_data = etf_data['volume (shares/day)']
log_min = float(np.floor(np.log10(selection_data.min())))
log_max = float(np.ceil(np.log10(selection_data.max())))
min_vol, max_vol = sl_obj.slider('Average Volume (shares/day)',
min_value=float(log_min),
max_value=float(log_max),
value=(float(log_min), float(log_max)),
step=float(log_min - log_max) / 100,
format='10^%.1f'
)
selected = (selection_data >= 10**min_vol) & (selection_data <= 10**max_vol)
selected_etfs = list(set(selected_etfs) & set(selection_data[selected].index))
if 'By market cap' in selected_method_choose_dates:
selection_data = etf_data['net assets (million USD)']
log_min = float(np.floor(np.log10(selection_data.min())))
log_max = float(np.ceil(np.log10(selection_data.max())))
min_vol, max_vol = sl_obj.slider('Market Cap as of 2021-02-21 (million USD)',
min_value=float(log_min),
max_value=float(log_max),
value=(float(log_min), float(log_max)),
step=float(log_min - log_max) / 100,
format='10^%.1f'
)
selected = (selection_data >= 10**min_vol) & (selection_data <= 10**max_vol)
selected_etfs = list(set(selected_etfs) & set(selection_data[selected].index))
if 'Only ESG ETFs' in selected_method_choose_dates:
esg_etfs = etf_data[etf_data['esg'] == True].index
selected_etfs = list(set(selected_etfs) & set(esg_etfs))
if 'choose specific ETFs' in selected_method_choose_dates:
selected_etfs = sl_obj.multiselect('Which ETFs do you want to look at', list(selected_etfs), ['ESGV','VTI','BND', 'VCEB', 'VSGX'])
return selected_etfs
def get_averages(data, selected_dates, selected_etfs):
"""
Obtain average values of various ETFs across the trading day.
Parameters
----------
data : pd.DataFrame
data of various days and ETFs.
selected_dates : list of str
list of dates in format YYYY-MM-DD.
selected_etfs : list of str
list of ETF tickers.
Returns
-------
pd.Series
Data frame of average values in ETFs at various times during tradiing day.
"""
potential_columns = product(selected_dates, selected_etfs)
actual_columns = [x for x in potential_columns if x in data.columns]
return data[actual_columns].T.groupby(level=['etf']).mean().T
def add_trade_windows(p, t_new, t_old, ymax):
"""
Add trade windows to plot
Parameters
----------
p : Bokeh figure
Figure to add trading windows to.
t_new : tuple of timestamps
Starting and ending timestamp of the old trading window.
t_old : tuple of timestamps
Starting and ending timestamp of the new trading window.
ymax : float
Maxs value to extend trading windows.
Returns
-------
None.
"""
source = ColumnDataSource(dict(x=[t_old[0]+0.5*(t_old[1]-t_old[0]),t_new[0]+0.5*(t_new[1]-t_new[0])],
y=[ymax-0.0002, ymax-0.0002 ],
w=[t_old[1]-t_old[0], t_new[1]-t_new[0]],
h =[2,2],
desc=['Old', 'New']))
if ymax > 2:
patch = {'h' : [ (0, ymax), (1, ymax) ],}
source.patch(patch)
boxes = Rect(x='x',y='y',width='w', height='h', fill_color='grey', fill_alpha=0.1,
line_width=0)
boxes_select = Rect(x='x',y='y',width='w', height='h', fill_color='grey', fill_alpha=.2,
line_width=0)
box_rend = p.add_glyph(source, boxes)
box_rend.hover_glyph = boxes_select
tooltips = [('trade window','@desc')]
p.add_tools(HoverTool(tooltips=tooltips, renderers=[box_rend]))
def format_plots(p, ymax=None):
"""
Format bokeh plots for quoted spreads across market times
Parameters
----------
p : Bokeh figure plot
Bokeh plot object to format
ymax : TYPE, optional
Max yaxis value. The default is None.
Returns
-------
None
"""
if ymax is None:
num_formatter='0.00%'
else:
num_zeros = int(np.log10(1/ymax)-.4)
num_formatter = '0.'+''.join(['0' for x in range(num_zeros)])+'%'
p.yaxis.formatter = NumeralTickFormatter(format=num_formatter)
p.xaxis.formatter = DatetimeTickFormatter(hours='%H:%M')
p.xaxis.axis_label = 'Market Time'
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.toolbar.autohide = True
def make_multi_etf_plot(selected_etfs, selected_dates, t_new, t_old, quoted_spread):
"""
Make plot with multiple ETF averages
Parameters
----------
selected_etfs : list of str
List of ETF tickers
selected_dates : list of str
List of dates to obtain averages of. In format YYYY-MM-DD.
t_new : tuple of timestamps
Starting and ending timestamp of the old trading window.
t_old : tuple of timestamps
Starting and ending timestamp of the new trading window.
quoted_spread : pd.DataFrame
Quoted spread data for various times, days, and ETFs.
Returns
-------
p : Bokeh figure
Plot of multiple ETF averages.
"""
t_all = t_new + t_old
average_data = get_averages(quoted_spread, selected_dates, selected_etfs)
p = figure(plot_width=400, plot_height=400, x_axis_type="datetime",
toolbar_location='below', title='quoted Bid-Ask Spread for various ETFs',
x_range=(pd.Timestamp('2021-01-01 9:30'), max(t_all)+pd.Timedelta(hours=1.5)),
y_range=(0, average_data.max().max()+0.0001))
#trading windows
add_trade_windows(p, t_new, t_old, average_data.max().max())
# etf lines
renders = []
for etf in selected_etfs:
renders.append(p.line(average_data.index, average_data[etf],# set visual properties for selected glyphs
hover_color="firebrick",
hover_alpha=1,
# set visual properties for non-selected glyphs
color="grey",
alpha=0.5,
name=etf))
tooltips = [('etf','$name'),
('time','$x{%H:%M}'),
('Bid-Ask spread', '$y{"0.00%"}')]
formatters = { "$x": "datetime",}
p.add_tools(HoverTool(tooltips=tooltips, renderers=renders, formatters=formatters))
format_plots(p, ymax=average_data.max().max()+0.0001)
return p
def make_single_etf_plot(selected_etf, selected_dates, t_new, t_old, quoted_spread, supress_hover_after= 10000):
"""
Plots data for a single ETF for multiple days.
Parameters
----------
selected_etfs : list of str
List of ETF tickers
selected_dates : list of str
List of dates to plot. In format YYYY-MM-DD.
t_new : tuple of timestamps
Starting and ending timestamp of the old trading window.
t_old : tuple of timestamps
Starting and ending timestamp of the new trading window.
quoted_spread : pd.DataFrame
Quoted spread data for various times, days, and ETFs.
supress_hover_after : int, optional
Do not show hover functionality if there are more than this number of days. The default is 10000.
Returns
-------
p : Bokeh figure
Plot of single ETF over various days.
"""
t_all = t_new + t_old
average_data = get_averages(quoted_spread, selected_dates, [selected_etf])
p = figure(plot_width=400, plot_height=400, x_axis_type="datetime",
toolbar_location='below', title='Quoted spread for {}'.format(selected_etf),
x_range=(pd.Timestamp('2021-01-01 9:30'), max(t_all)+pd.Timedelta(hours=1.5)),
y_range=(0, average_data.max().max()+0.0001))
add_trade_windows(p, t_new, t_old, average_data.max().max())
# etf lines
renders = []
if len(selected_dates) > 1:
for date in selected_dates:
try:
render = p.line(quoted_spread.index, quoted_spread.loc[:,(date,selected_etf)],# set visual properties for selected glyphs
hover_color="firebrick",
hover_alpha=0.33,
color="grey",
alpha=0.25,
name=date)
except KeyError:
continue
if len(selected_dates) < supress_hover_after:
renders.append(render)
average_name = 'average'
else:
average_name = selected_dates[0]
renders.append(p.line(average_data.index, average_data[selected_etf],# set visual properties for selected glyphs
hover_color="firebrick",
hover_alpha=0.75,
color="black",
alpha=0.5,
name=average_name))
tooltips = [('date','$name'),
('time','$x{%H:%M}'),
('Bid-Ask spread', '$y{"0.00%"}')]
formatters = { "$x": "datetime",}
p.add_tools(HoverTool(tooltips=tooltips, renderers=renders, formatters=formatters))
format_plots(p)
return p
def make_bid_ask_plot(selected_etf, selected_date, t_new, t_old, directory):
"""
Plots bid and ask prices over one trading day for one ETF.
Parameters
----------
selected_etf : str
ETF ticker of data to show.
selected_date : str
Date of data to show. In format YYYY-MM-DD.
t_new : tuple of timestamps
Starting and ending timestamp of the old trading window.
t_old : tuple of timestamps
Starting and ending timestamp of the new trading window.
directory : str
Folder containing ETF bid and ask price data. File must be in format date_etf.csv.
Returns
-------
p : Bokeh figure
Plot of bid and ask prices.
"""
data = pd.read_csv(os.path.join(directory, '{}_{}.csv'.format(selected_date, selected_etf)), index_col=0)
basetime = pd.to_datetime('2021-01-01') + pd.Timedelta(hours=9, minutes=30)
timedeltas = pd.TimedeltaIndex([pd.Timedelta(seconds=x) for x in data.index])
data.index = timedeltas + basetime
t_all = t_new + t_old
bid = data.bid
ask = data.ask
p = figure(plot_width=400, plot_height=400, x_axis_type="datetime",
toolbar_location='below', title='Bid & ask prices for {} on {}'.format(selected_etf, selected_date),
x_range=(pd.Timestamp('2021-01-01 9:30'), max(t_all)+pd.Timedelta(hours=1.5)),
y_range=(min(bid.min(),ask.min())-0.2, max(bid.max(),ask.max())+0.2))
add_trade_windows(p, t_new, t_old, max(bid.max(),ask.max()))
renders = []
renders.append(p.line(bid.index, bid.values,# set visual properties for selected glyphs
hover_color="blue",
hover_alpha=1,
color="blue",
alpha=.5,
name='bid'))
renders.append(p.line(ask.index, ask.values,# set visual properties for selected glyphs
hover_color="firebrick",
hover_alpha=1,
color="firebrick",
alpha=0.5,
name='ask'))
tooltips = [('type','$name'),
('time','$x{%H:%M}'),
('price', '$y{"$0.00"}')]
formatters = { "$x": "datetime",}
p.add_tools(HoverTool(tooltips=tooltips, renderers=renders, formatters=formatters))
format_plots(p)
p.yaxis.formatter = NumeralTickFormatter(format="$0.00")
return p
def make_relative_fee_amount(selected_ratios, t_new_text = ''):
"""
Generate a bar plot for the ratio of quoted spread to expense ratio.
Parameters
----------
selected_ratios : pd.Series
Data of ratio of quoted spread to expense ratio.
t_new_text : str
Time range to place in title of plot.
Returns
-------
p : Bokeh figure
Produced plot.
"""
p = figure(plot_width=400, plot_height=400,
x_axis_label="ETFs", x_minor_ticks=len(selected_ratios),
toolbar_location='below', title='Ratio of quoted spread to expense ratio {}'.format(t_new_text))
source = ColumnDataSource(dict(x=range(len(selected_ratios)),
top=selected_ratios.values,
desc=selected_ratios.index,))
glyph = VBar(x='x', top='top', bottom=0, width=0.5, fill_color='grey',
line_width=0, fill_alpha=0.5)
glyph_hover = VBar(x='x', top='top', bottom=0, width=0.5, fill_color='firebrick',
line_width=0, fill_alpha=1)
rend = p.add_glyph(source, glyph)
rend.hover_glyph = glyph_hover
labels = LabelSet(x='x', level='glyph', source=source, render_mode='canvas')
tooltips = [('etf','@desc'),
('ratio','@top')]
p.add_tools(HoverTool(tooltips=tooltips, renderers=[rend]))
num_zeros = int(np.log10(1/selected_ratios.max())-.4)
num_formatter = '0.'+''.join(['0' for x in range(num_zeros)])+'%'
p.yaxis.formatter = NumeralTickFormatter(format=num_formatter)
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.toolbar.autohide = True
p.xaxis.bounds = (-.5,len(selected_ratios)-.5)
p.xaxis.ticker = list(range(len(selected_ratios)))
p.xaxis.major_label_overrides = dict(zip(range(len(selected_ratios)), list(selected_ratios.index)))
p.xaxis.major_label_orientation = 3.14/2
return p
def get_quoted_spread_change(selected_etfs, selected_dates, t_old, t_new, quoted_spread):
"""
Get the relative change in average quoted spread between the two time windows.
Parameters
----------
selected_etfs : list of str
List of ETF tickers
selected_dates : list of str
List of dates to obtain averages of. In format YYYY-MM-DD.
t_new : tuple of timestamps
Starting and ending timestamp of the old trading window.
t_old : tuple of timestamps
Starting and ending timestamp of the new trading window.
quoted_spread : pd.DataFrame
Quoted spread data for various times, days, and ETFs.
Returns
-------
pd.Series
The relative change in average quoted spread between the two time windows.
"""
df = get_averages(quoted_spread, selected_dates, selected_etfs)
old_quotes = df[(df.index > t_old[0]) & (df.index < t_old[1])].mean(0)
new_quotes = df[(df.index > t_new[0]) & (df.index < t_new[1])].mean(0)
return (new_quotes / old_quotes).sort_values(ascending=False)
def create_metrics(fractional_increase, nwide=4, container=st, max_rows=2):
"""
Print information about fractional change in quoted spreads in metric form
Parameters
----------
fractional_increase : pd.Series
Data of the increase in fees between two windows.
nwide : int, optional
Number of metrics to print side-by-side. The default is 4.
container : streamlit object, optional
Object to display metrics. The default is st.
max_rows : int, optional
Max number of rows to present data for. The default is 2.
Returns
-------
None.
"""
metrics = {}
rows = 0
for etf, val in dict(fractional_increase).items():
if len(metrics) == nwide:
with container:
metric_row(metrics)
metrics = {}
rows += 1
if rows == max_rows:
break
metrics[etf] = '{:.0f}%'.format((val-1)*100)
if len(metrics) > 0:
with container:
metric_row(metrics)
st.write("# Bid-Ask spreads. Does time of day matter?")
st.write("#### By <NAME>")
st.write('first published March 10, 2021')
intro = st.beta_expander("Introduction")
data_selection = st.beta_expander("Data selection")
results = st.beta_expander("Results")
conclusion = st.beta_expander("Conclusion")
methods = st.beta_expander("Methods")
disclaimer = st.beta_expander("Disclaimer")
quoted_spread = pd.read_pickle('data/quoted_spread.pkl')
# remove outliers that impact average
del quoted_spread[('2020-12-16', 'SPCX')] # high value on second day of trading
del quoted_spread[('2020-03-12', 'ESGU')] # short high value on during large uncertainty
del quoted_spread[('2020-03-17', 'DRIV')] # short high value on during large uncertainty
del quoted_spread[('2020-02-03', 'EAGG')] # short high value on during large uncertainty
all_dates = list(quoted_spread.columns.levels[0])
all_dates.sort()
all_etfs = list(quoted_spread.columns.levels[1])
etf_data = pd.read_csv('etf.csv', index_col='Symbol')
etf_data = etf_data[etf_data['for_data'] == True]
start, end = data_selection.select_slider('Dates to analyze', all_dates, (all_dates[0], all_dates[-1]))
selected_dates = all_dates[all_dates.index(start):all_dates.index(end)]
method_choose_etfs = data_selection.multiselect('Methods for selecting ETFs',
['By volume traded', 'By market cap', 'Only ESG ETFs', 'choose specific ETFs'], ['choose specific ETFs'])
selected_etfs = display_method_to_choose_etfs(method_choose_etfs, all_etfs,etf_data,sl_obj=data_selection)
left_column, right_column = data_selection.beta_columns(2)
t_old = right_column.slider('Old trading window timing',
min_value=pd.Timestamp('2021-01-01 9:30').to_pydatetime(),
max_value=pd.Timestamp('2021-01-01 16:00').to_pydatetime(),
value=(pd.Timestamp('2021-01-01 10:00').to_pydatetime(), pd.Timestamp('2021-01-01 10:15').to_pydatetime()),
step=pd.Timedelta(minutes=5).to_pytimedelta(),
format='H:mm'
)
t_new = left_column.slider('New trading window timing',
min_value=pd.Timestamp('2021-01-01 9:30').to_pydatetime(),
max_value=pd.Timestamp('2021-01-01 16:00').to_pydatetime(),
value=(pd.Timestamp('2021-01-01 9:30').to_pydatetime(), | pd.Timestamp('2021-01-01 9:45') | pandas.Timestamp |
#!/usr/bin/env python
##Run this file in terminal. The command is: python3 common_variations.py trait1.txt trait2.txt.txt
#import the data
import sys, os
import pandas as pd
inFile1=sys.argv[1]
inFile2=sys.argv[2]
inFile1t=os.path.splitext(inFile1)[1]
inFile2t=os.path.splitext(inFile2)[1]
outFile1 = os.path.splitext(inFile1)[0]
outFile2 = os.path.splitext(inFile2)[0]
#saving the excels as pandas tables
if inFile1t=='.xls' or inFile1t=='.xlsx':
tb1 = pd.read_excel(inFile1)
if inFile1t=='.csv':
tb1 = pd.read_csv(inFile1)
if inFile2t=='.xls' or inFile2t=='.xlsx':
tb2 = pd.read_excel(inFile2)
if inFile2t=='.csv':
tb2 = | pd.read_csv(inFile2) | pandas.read_csv |
import datetime
import pandas as pd
from typing import List
from config import Config
from translation import Translate
from cachetools import cached, TTLCache
cache = TTLCache(maxsize=10, ttl=60)
@cached(cache)
class Data:
data = None
aggregated_data = None
total_regions_data = None
regions_data = None
regions_list = None
t = None
source = None
source_config = None
n_days = 0
features = []
extended_features = []
latest_update = "1970-01-01 00:00:00"
def __init__(self, source: str, lang: str = "English"):
self.source = source
self.source_config = Config().sources.get(self.source)
self.t = Translate(lang)
self.data = getattr(self, "get_data_" + self.source,
pd.DataFrame)()
if self.data.empty:
return
self.normalize_date()
self.set_latest_update()
self.data = self.calculate_days_passed(self.data)
self.set_features(self.source_config['no_feature_columns'])
# Remove the time and just focus on the date
self.aggregate_data()
self.set_total_regions_data()
self.set_regions_data()
self.set_regions_list()
def get_data_ita(self):
_data = | pd.read_csv(self.source_config['csv'][0]) | pandas.read_csv |
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
data = pd.read_csv(path)
data = pd.DataFrame(data)
data.rename(columns = {'Total':'Total_Medals'}, inplace = True)
data.head(10)
#Code starts here
# --------------
#Code starts here
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
data = | pd.read_csv(path) | pandas.read_csv |
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
import gc
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
import logging
import itertools
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split
#modify to work with kfold
#def smoteAdataset(Xig, yig, test_size=0.2, random_state=0):
#def smoteAdataset(Xig_train, yig_train, Xig_test, yig_test):
# sm=SMOTE(random_state=2)
# Xig_train_res, yig_train_res = sm.fit_sample(Xig_train, yig_train.ravel())
# return Xig_train_res, pd.Series(yig_train_res), Xig_test, pd.Series(yig_test)
def create_logger():
logger_ = logging.getLogger('main')
logger_.setLevel(logging.DEBUG)
fh = logging.FileHandler('simple_lightgbm.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(levelname)s]%(asctime)s:%(name)s:%(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger_.addHandler(fh)
logger_.addHandler(ch)
def get_logger():
return logging.getLogger('main')
def lgb_multi_weighted_logloss(y_true, y_preds):
classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95]
class_weight = {6: 1, 15: 2, 16: 1, 42: 1, 52: 1, 53: 1, 62: 1, 64: 2, 65: 1, 67: 1, 88: 1, 90: 1, 92: 1, 95: 1}
if len(np.unique(y_true)) > 14:
classes.append(99)
class_weight[99] = 2
y_p = y_preds.reshape(y_true.shape[0], len(classes), order='F')
y_ohe = pd.get_dummies(y_true)
y_p = np.clip(a=y_p, a_min=1e-15, a_max=1 - 1e-15)
y_p_log = np.log(y_p)
y_log_ones = np.sum(y_ohe.values * y_p_log, axis=0)
nb_pos = y_ohe.sum(axis=0).values.astype(float)
class_arr = np.array([class_weight[k] for k in sorted(class_weight.keys())])
y_w = y_log_ones * class_arr / nb_pos
loss = - np.sum(y_w) / np.sum(class_arr)
return 'wloss', loss, False
def multi_weighted_logloss(y_true, y_preds):
classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95]
class_weight = {6: 1, 15: 2, 16: 1, 42: 1, 52: 1, 53: 1, 62: 1, 64: 2, 65: 1, 67: 1, 88: 1, 90: 1, 92: 1, 95: 1}
if len(np.unique(y_true)) > 14:
classes.append(99)
class_weight[99] = 2
y_p = y_preds
y_ohe = pd.get_dummies(y_true)
y_p = np.clip(a=y_p, a_min=1e-15, a_max=1 - 1e-15)
y_p_log = np.log(y_p)
y_log_ones = np.sum(y_ohe.values * y_p_log, axis=0)
nb_pos = y_ohe.sum(axis=0).values.astype(float)
class_arr = np.array([class_weight[k] for k in sorted(class_weight.keys())])
y_w = y_log_ones * class_arr / nb_pos
loss = - np.sum(y_w) / np.sum(class_arr)
return loss
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
def predict_chunk(df_, clfs_, meta_, features, train_mean):
df_, aux_df_ = preprocess_ts_df(df_)
auxs = make_features(df_, aux_df_)
aggs = get_aggregations()
aggs = get_aggregations()
new_columns = get_new_columns(aggs)
agg_ = df_.groupby('object_id').agg(aggs)
agg_.columns = new_columns
agg_ = add_features_to_agg(df=agg_)
full_test = agg_.reset_index().merge(
right=meta_,
how='left',
on='object_id'
)
for aux in auxs:
full_test = pd.merge(full_test, aux, on='object_id', how='left')
full_test = postprocess_df(full_test)
#full_test = full_test.fillna(train_mean)
preds_ = None
for clf in clfs_:
if preds_ is None:
preds_ = clf.predict_proba(full_test[features]) / len(clfs_)
else:
preds_ += clf.predict_proba(full_test[features]) / len(clfs_)
preds_99 = np.ones(preds_.shape[0])
for i in range(preds_.shape[1]):
preds_99 *= (1 - preds_[:, i])
preds_df_ = pd.DataFrame(preds_, columns=['class_' + str(s) for s in clfs_[0].classes_])
preds_df_['object_id'] = full_test['object_id']
preds_df_['class_99'] = 0.14 * preds_99 / np.mean(preds_99)
print(preds_df_['class_99'].mean())
del agg_, full_test, preds_
gc.collect()
return preds_df_
def save_importances(importances_):
mean_gain = importances_[['gain', 'feature']].groupby('feature').mean()
importances_['mean_gain'] = importances_['feature'].map(mean_gain['gain'])
plt.figure(figsize=(8, 12))
sns.barplot(x='gain', y='feature', data=importances_.sort_values('mean_gain', ascending=False))
plt.tight_layout()
plt.savefig('importances.png')
def train_classifiers(full_train=None, y=None):
folds = StratifiedKFold(n_splits=10, shuffle=True, random_state=123)
clfs = []
importances = pd.DataFrame()
lgb_params = {
'boosting_type': 'gbdt',
'objective': 'multiclass',
'num_class': 14,
'metric': 'multi_logloss',
'learning_rate': 0.03,
'subsample': .9,
'colsample_bytree': .6,
'reg_alpha': .01,
'reg_lambda': .01,
'min_split_gain': 0.02,
'min_child_weight': 5,
'n_estimators': 10000,
'silent': -1,
'verbose': -1,
'max_depth': 3,
'seed': 159
}
oof_preds = np.zeros((len(full_train), np.unique(y).shape[0]))
full_ids = np.zeros(len(full_train))
w = y.value_counts()
ori_weights = {i : np.sum(w) / w[i] for i in w.index}
weights = {i : np.sum(w) / w[i] for i in w.index}
classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95]
class_weight = {6: 1, 15: 2, 16: 1, 42: 1, 52: 1, 53: 1, 62: 1, 64: 2, 65: 1, 67: 1, 88: 1, 90: 1, 92: 1, 95: 1}
for value in classes:
weights[value] = weights[value] * class_weight[value]
for fold_, (trn_, val_) in enumerate(folds.split(y, y)):
lgb_params['seed'] += fold_
trn_x, trn_y = full_train.iloc[trn_], y.iloc[trn_]
val_x, val_y = full_train.iloc[val_], y.iloc[val_]
full_ids[val_] = val_x['object_id']
del val_x['object_id'], trn_x['object_id']
# trn_xa, trn_y, val_xa, val_y=smoteAdataset(trn_x.values, trn_y.values, val_x.values, val_y.values)
# trn_x=pd.DataFrame(data=trn_xa, columns=trn_x.columns)
# val_x=pd.DataFrame(data=val_xa, columns=val_x.columns)
clf = lgb.LGBMClassifier(**lgb_params)
clf.fit(
trn_x, trn_y,
eval_set=[(trn_x, trn_y), (val_x, val_y)],
eval_metric=lgb_multi_weighted_logloss,
verbose=100,
early_stopping_rounds=50,
sample_weight=trn_y.map(weights)
)
oof_preds[val_, :] = clf.predict_proba(val_x, num_iteration=clf.best_iteration_)
get_logger().info(multi_weighted_logloss(val_y, clf.predict_proba(val_x, num_iteration=clf.best_iteration_)))
imp_df = pd.DataFrame()
imp_df['feature'] = trn_x.columns
imp_df['gain'] = clf.feature_importances_
imp_df['fold'] = fold_ + 1
importances = pd.concat([importances, imp_df], axis=0, sort=False)
clfs.append(clf)
get_logger().info('MULTI WEIGHTED LOG LOSS : %.5f ' % multi_weighted_logloss(y_true=y, y_preds=oof_preds))
preds_df_ = pd.DataFrame(oof_preds, columns=['class_' + str(s) for s in clfs[0].classes_])
preds_df_['object_id'] = full_ids
print(preds_df_.head())
preds_df_.to_csv("oof_predictions.csv", index=False)
unique_y = np.unique(y)
class_map = dict()
for i,val in enumerate(unique_y):
class_map[val] = i
y_map = np.zeros((y.shape[0],))
y_map = np.array([class_map[val] for val in y])
# Compute confusion matrix
from sklearn.metrics import confusion_matrix
cnf_matrix = confusion_matrix(y_map, np.argmax(oof_preds,axis=-1))
np.set_printoptions(precision=2)
sample_sub = pd.read_csv('../input/sample_submission.csv')
class_names = list(sample_sub.columns[1:-1])
del sample_sub;gc.collect()
# Plot non-normalized confusion matrix
plt.figure(figsize=(12,12))
foo = plot_confusion_matrix(cnf_matrix, classes=class_names,normalize=True,
title='Confusion matrix')
return clfs, importances
def get_aggregations():
return {
'flux': ['min', 'max', 'mean', 'median', 'std', 'skew'],
'flux_err': ['min', 'max', 'mean', 'median', 'std', 'skew'],
'detected': ['sum'],
'flux_ratio_sq': ['sum','skew'],
'flux_by_flux_ratio_sq': ['sum','skew'],
}
def get_new_columns(aggs):
return [k + '_' + agg for k in aggs.keys() for agg in aggs[k]]
def add_features_to_agg(df):
df['flux_diff'] = df['flux_max'] - df['flux_min']
df['flux_dif2'] = (df['flux_max'] - df['flux_min']) / df['flux_mean']
df['flux_w_mean'] = df['flux_by_flux_ratio_sq_sum'] / df['flux_ratio_sq_sum']
df['flux_dif3'] = (df['flux_max'] - df['flux_min']) / df['flux_w_mean']
return df
def agg_per_obj_passband(df, col, agg):
aux = df[['object_id','passband']+[col]]
aggs = {col: [agg]}
aux = df.groupby(['object_id','passband']).agg(aggs).reset_index()
new_df = pd.DataFrame()
new_df['object_id'] = aux['object_id'].unique()
for x in range(0,6):
new_aux = aux[aux['passband'] == x]
del new_aux['passband']
new_aux.columns = ['object_id',col+'_'+agg+'_passband_'+str(x)]
new_df = pd.merge(new_df, new_aux, on='object_id', how='left')
new_df = new_df.fillna(0)
return new_df
def mjd_diff_detected(df, col):
mjd_max = df.groupby('object_id')[col].max().reset_index()
mjd_min = df.groupby('object_id')[col].min().reset_index()
mjd_max.columns = ['object_id',col+'_max']
mjd_min.columns = ['object_id',col+'_min']
df = pd.merge(df, mjd_max, on='object_id', how='left')
df = pd.merge(df, mjd_min, on='object_id', how='left')
df[col+'_diff_detected'] = df[col+'_max'] - df[col+'_min']
aux_df = df.groupby('object_id')[col+'_diff_detected'].max().reset_index()
return aux_df
def mjd_diff2_detected(df, col):
mjd_max = df.groupby('object_id')[col].max().reset_index()
mjd_min = df.groupby('object_id')[col].min().reset_index()
mjd_mean = df.groupby('object_id')[col].mean().reset_index()
mjd_max.columns = ['object_id',col+'_max']
mjd_min.columns = ['object_id',col+'_min']
mjd_mean.columns = ['object_id',col+'_mean']
df = pd.merge(df, mjd_max, on='object_id', how='left')
df = pd.merge(df, mjd_min, on='object_id', how='left')
df = pd.merge(df, mjd_mean, on='object_id', how='left')
df[col+'_diff2_detected'] = (df[col+'_max'] - df[col+'_min']) / df[col+'_mean']
aux_df = df.groupby('object_id')[col+'_diff2_detected'].max().reset_index()
return aux_df
def mjd_diff_detected_passband(df, col):
mjd_max = df.groupby(['object_id','passband'])[col].max().reset_index()
mjd_min = df.groupby(['object_id','passband'])[col].min().reset_index()
mjd_max.columns = ['object_id','passband',col+'_max']
mjd_min.columns = ['object_id','passband',col+'_min']
df = pd.merge(df, mjd_max, on=['object_id','passband'], how='left')
df = pd.merge(df, mjd_min, on=['object_id','passband'], how='left')
df[col+'_diff'] = df[col+'_max'] - df[col+'_min']
aux = df.groupby(['object_id','passband'])[col+'_diff'].max().reset_index()
new_df = pd.DataFrame()
new_df['object_id'] = aux['object_id'].unique()
for x in range(0,6):
new_aux = aux[aux['passband'] == x]
del new_aux['passband']
new_aux.columns = ['object_id',col+'_detected_passband_'+str(x)]
new_df = pd.merge(new_df, new_aux, on='object_id', how='left')
new_df = new_df.fillna(0)
return new_df
def flux_around_max_passband(df, tam_window):
max_flux = df.groupby('object_id')['flux'].max().reset_index()
max_flux.columns = ['object_id','max_flux_obj']
df = pd.merge(df, max_flux, on='object_id', how='left')
df['RWMF'] = 0
df['RWMF'][df['flux'] == df['max_flux_obj']] = 1
df = df.sort_values(['object_id','mjd'])
max_mjd = df[df['RWMF'] == 1]
max_mjd = max_mjd[['object_id','mjd']]
max_mjd.columns = ['object_id','mjd_where_flux_max']
df = pd.merge(df, max_mjd, on='object_id', how='left')
df['time_walk'] = df['mjd_where_flux_max'] + tam_window
df['mjd_where_flux_max'] = df['mjd_where_flux_max'] - tam_window
aux_df = df[(df['mjd'] > df['mjd_where_flux_max'])&(df['mjd'] < df['time_walk'])]
aux_df = aux_df[['object_id','passband','flux']]
rtn_df = aux_df.groupby(['object_id','passband'])['flux'].mean().reset_index()
q_df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 26 21:55:57 2020
@author: <NAME>
"""
import pytest
import numpy as np
import pandas as pd
from pathlib import Path
import pickle as pckl
import hgc
import os
from hgc import ner
from hgc import io
import tests
# from googletrans import Translator
def test_ner():
''' to test whether the function ner can generate correctly mapped features and units '''
# WD = os.path.join(os.path.dirname(os.path.realpath(__file__))) # set work directory to current module
WD = Path(tests.__file__).parent
# @Xin/ MartinK: Test if all HGC features are included in hgc_io.default_features()
df_temp = pd.read_excel(WD / 'testfile1_io.xlsx')
feature_map, feature_unmapped, df_feature_map = hgc.ner.generate_feature_map(entity_orig=list(df_temp.iloc[2, slice(5, 999)].dropna()))
unit_map, unit_unmapped, df_unit_map = hgc.ner.generate_unit_map(entity_orig=list(df_temp.iloc[3, slice(5, 999)].dropna()))
assert feature_map['Acidity'] == 'ph'
assert feature_map['Electrical Conductivity'] == 'ec'
assert unit_map['mS/m'] == 'mS/m'
assert unit_map['μmol N/l'] == 'μmol/L N'
# Next version:
# # Add unit test of the "score" computed by the generate_untity_map function
def test_io_wide():
'''test wide-shaped file'''
WD = Path(tests.__file__).parent
# get feature_map and unit_map for testing
df_temp = | pd.read_excel(WD / 'testfile1_io.xlsx', sheet_name='wide') | pandas.read_excel |
###############################################
# Calculate the turnover moments from BLS Data
##############################################
"""
<NAME>
Script that calculates the moments to match
and that draws a lot of graphs using mainly data
from the BLS
"""
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
from more_itertools import unique_everseen
import pylab
import matplotlib.dates as mdates
from math import exp, log
import math
from datetime import date, timedelta as td
import statsmodels.api as sm
import scipy
from tabulate import tabulate
import csv
import statsmodels.formula.api as smm #OLS
from arch import arch_model
from scipy import stats
import matplotlib.cm as cm
#################################
# PATHS
# change "path_main" if necessary
path = '/home/julien/Final-Project-Julien-Pascal/data' #path to the data
path_figure = '/home/julien/Final-Project-Julien-Pascal/figures/' #where to save the figures
path_table = '/home/julien/Final-Project-Julien-Pascal/tables/' #where to save the tables
os.chdir(path) #locate in the folder with the data
def estimated_autocorrelation(x):
"""
http://stackoverflow.com/q/14297012/190597
http://en.wikipedia.org/wiki/Autocorrelation#Estimation
"""
n = len(x)
variance = x.var()
x = x-x.mean()
r = np.correlate(x, x, mode = 'full')[-n:]
assert np.allclose(r, np.array([(x[:n-k]*x[-(n-k):]).sum() for k in range(n)]))
result = r/(variance*(np.arange(n, 0, -1)))
return result
#################################################
# Select what date to begin with and when to stop
starting_year = 1951 #first quarter 1951. starting point for the HWI
starting_month = 1
starting_date = date(starting_year, starting_month, 1)
stop_year = 2015 #third quarter 2010
stop_month = 7
stop_date = date(stop_year, stop_month, 1)
df = | pd.read_csv("LNS.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
from datetime import datetime
def read_data(file_name, skiprows = 0, index_col = False):
df = pd.read_csv(file_name, skiprows = skiprows,error_bad_lines=False,index_col = index_col)
df = df[['bbr_x','bbr_y','fbr_x','fbr_y','fbl_x','fbl_y','bbl_x','bbl_y',
'Frame #','Timestamp','ID','direction','speed', 'width']]
rightToLeftDF = df.loc[df['direction'] == -1]
rightToLeftDF.columns = ['tr_x','tr_y','tl_x','tl_y','bl_x','bl_y','br_x','br_y',
'Frame #','Timestamp','ID','direction','speed', 'width']
leftToRightDF = df.loc[df['direction'] == 1]
leftToRightDF.columns = ['bl_x','bl_y','br_x','br_y','tr_x','tr_y','tl_x','tl_y',
'Frame #','Timestamp','ID','direction','speed','width']
leftToRightDF = leftToRightDF[['tr_x','tr_y','tl_x','tl_y','bl_x','bl_y','br_x','br_y',
'Frame #','Timestamp','ID','direction','speed','width']]
df = | pd.concat([rightToLeftDF, leftToRightDF]) | pandas.concat |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
from numpy import random
import numpy as np
from pandas.compat import lrange, lzip, u
from pandas import (compat, DataFrame, Series, Index, MultiIndex,
date_range, isnull)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
from pandas.core.common import PerformanceWarning
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSelectReindex(tm.TestCase, TestData):
# These are specific reindex-based tests; other indexing tests should go in
# test_indexing
_multiprocess_can_split_ = True
def test_drop_names(self):
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
df.index.name, df.columns.name = 'first', 'second'
df_dropped_b = df.drop('b')
df_dropped_e = df.drop('e', axis=1)
df_inplace_b, df_inplace_e = df.copy(), df.copy()
df_inplace_b.drop('b', inplace=True)
df_inplace_e.drop('e', axis=1, inplace=True)
for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):
self.assertEqual(obj.index.name, 'first')
self.assertEqual(obj.columns.name, 'second')
self.assertEqual(list(df.columns), ['d', 'e', 'f'])
self.assertRaises(ValueError, df.drop, ['g'])
self.assertRaises(ValueError, df.drop, ['g'], 1)
# errors = 'ignore'
dropped = df.drop(['g'], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
self.assert_index_equal(dropped.index, expected)
dropped = df.drop(['b', 'g'], errors='ignore')
expected = Index(['a', 'c'], name='first')
self.assert_index_equal(dropped.index, expected)
dropped = df.drop(['g'], axis=1, errors='ignore')
expected = Index(['d', 'e', 'f'], name='second')
self.assert_index_equal(dropped.columns, expected)
dropped = df.drop(['d', 'g'], axis=1, errors='ignore')
expected = Index(['e', 'f'], name='second')
self.assert_index_equal(dropped.columns, expected)
def test_drop_col_still_multiindex(self):
arrays = [['a', 'b', 'c', 'top'],
['', '', '', 'OD'],
['', '', '', 'wx']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.randn(3, 4), columns=index)
del df[('a', '', '')]
assert(isinstance(df.columns, MultiIndex))
def test_drop(self):
simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]})
assert_frame_equal(simple.drop("A", axis=1), simple[['B']])
assert_frame_equal(simple.drop(["A", "B"], axis='columns'),
simple[[]])
assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.ix[[2], :])
assert_frame_equal(simple.drop(
[0, 3], axis='index'), simple.ix[[1, 2], :])
self.assertRaises(ValueError, simple.drop, 5)
self.assertRaises(ValueError, simple.drop, 'C', 1)
self.assertRaises(ValueError, simple.drop, [1, 5])
self.assertRaises(ValueError, simple.drop, ['A', 'C'], 1)
# errors = 'ignore'
assert_frame_equal(simple.drop(5, errors='ignore'), simple)
assert_frame_equal(simple.drop([0, 5], errors='ignore'),
simple.ix[[1, 2, 3], :])
assert_frame_equal(simple.drop('C', axis=1, errors='ignore'), simple)
assert_frame_equal(simple.drop(['A', 'C'], axis=1, errors='ignore'),
simple[['B']])
# non-unique - wheee!
nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')),
columns=['a', 'a', 'b'])
assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])
assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])
nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))
nu_df.columns = list('abc')
assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.ix[["Y"], :])
assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.ix[[], :])
# inplace cache issue
# GH 5628
df = pd.DataFrame(np.random.randn(10, 3), columns=list('abc'))
expected = df[~(df.b > 0)]
df.drop(labels=df[df.b > 0].index, inplace=True)
assert_frame_equal(df, expected)
def test_drop_multiindex_not_lexsorted(self):
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
self.assertTrue(lexsorted_df.columns.is_lexsorted())
# define the non-lexsorted version
not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3],
[1, 'b2', 'c2', 4]])
not_lexsorted_df = not_lexsorted_df.pivot_table(
index='a', columns=['b', 'c'], values='d')
not_lexsorted_df = not_lexsorted_df.reset_index()
self.assertFalse(not_lexsorted_df.columns.is_lexsorted())
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.drop('a', axis=1)
with tm.assert_produces_warning(PerformanceWarning):
result = not_lexsorted_df.drop('a', axis=1)
tm.assert_frame_equal(result, expected)
def test_merge_join_different_levels(self):
# GH 9455
# first dataframe
df1 = DataFrame(columns=['a', 'b'], data=[[1, 11], [0, 22]])
# second dataframe
columns = MultiIndex.from_tuples([('a', ''), ('c', 'c1')])
df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]])
# merge
columns = ['a', 'b', ('c', 'c1')]
expected = DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]])
with tm.assert_produces_warning(UserWarning):
result = pd.merge(df1, df2, on='a')
tm.assert_frame_equal(result, expected)
# join, see discussion in GH 12219
columns = ['a', 'b', ('a', ''), ('c', 'c1')]
expected = DataFrame(columns=columns,
data=[[1, 11, 0, 44], [0, 22, 1, 33]])
with tm.assert_produces_warning(UserWarning):
result = df1.join(df2, on='a')
tm.assert_frame_equal(result, expected)
def test_reindex(self):
newFrame = self.frame.reindex(self.ts1.index)
for col in newFrame.columns:
for idx, val in compat.iteritems(newFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(newFrame):
self.assertTrue(tm.equalContents(series.index, newFrame.index))
emptyFrame = self.frame.reindex(Index([]))
self.assertEqual(len(emptyFrame.index), 0)
# Cython code should be unit-tested directly
nonContigFrame = self.frame.reindex(self.ts1.index[::2])
for col in nonContigFrame.columns:
for idx, val in compat.iteritems(nonContigFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(nonContigFrame):
self.assertTrue(tm.equalContents(series.index,
nonContigFrame.index))
# corner cases
# Same index, copies values but not index if copy=False
newFrame = self.frame.reindex(self.frame.index, copy=False)
self.assertIs(newFrame.index, self.frame.index)
# length zero
newFrame = self.frame.reindex([])
self.assertTrue(newFrame.empty)
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# length zero with columns reindexed with non-empty index
newFrame = self.frame.reindex([])
newFrame = newFrame.reindex(self.frame.index)
self.assertEqual(len(newFrame.index), len(self.frame.index))
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# pass non-Index
newFrame = self.frame.reindex(list(self.ts1.index))
self.assert_index_equal(newFrame.index, self.ts1.index)
# copy with no axes
result = self.frame.reindex()
assert_frame_equal(result, self.frame)
self.assertFalse(result is self.frame)
def test_reindex_nan(self):
df = pd.DataFrame([[1, 2], [3, 5], [7, 11], [9, 23]],
index=[2, np.nan, 1, 5],
columns=['joe', 'jim'])
i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1]
assert_frame_equal(df.reindex(i), df.iloc[j])
df.index = df.index.astype('object')
assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False)
# GH10388
df = pd.DataFrame({'other': ['a', 'b', np.nan, 'c'],
'date': ['2015-03-22', np.nan,
'2012-01-08', np.nan],
'amount': [2, 3, 4, 5]})
df['date'] = pd.to_datetime(df.date)
df['delta'] = (pd.to_datetime('2015-06-18') - df['date']).shift(1)
left = df.set_index(['delta', 'other', 'date']).reset_index()
right = df.reindex(columns=['delta', 'other', 'date', 'amount'])
assert_frame_equal(left, right)
def test_reindex_name_remains(self):
s = Series(random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(i)
self.assertEqual(df.index.name, 'iname')
df = df.reindex(Index(np.arange(10), name='tmpname'))
self.assertEqual(df.index.name, 'tmpname')
s = Series(random.rand(10))
df = DataFrame(s.T, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(columns=i)
self.assertEqual(df.columns.name, 'iname')
def test_reindex_int(self):
smaller = self.intframe.reindex(self.intframe.index[::2])
self.assertEqual(smaller['A'].dtype, np.int64)
bigger = smaller.reindex(self.intframe.index)
self.assertEqual(bigger['A'].dtype, np.float64)
smaller = self.intframe.reindex(columns=['A', 'B'])
self.assertEqual(smaller['A'].dtype, np.int64)
def test_reindex_like(self):
other = self.frame.reindex(index=self.frame.index[:10],
columns=['C', 'B'])
assert_frame_equal(other, self.frame.reindex_like(other))
def test_reindex_columns(self):
newFrame = self.frame.reindex(columns=['A', 'B', 'E'])
assert_series_equal(newFrame['B'], self.frame['B'])
self.assertTrue(np.isnan(newFrame['E']).all())
self.assertNotIn('C', newFrame)
# length zero
newFrame = self.frame.reindex(columns=[])
self.assertTrue(newFrame.empty)
def test_reindex_axes(self):
# GH 3317, reindexing by both axes loses freq of the index
df = DataFrame(np.ones((3, 3)),
index=[datetime(2012, 1, 1),
datetime(2012, 1, 2),
datetime(2012, 1, 3)],
columns=['a', 'b', 'c'])
time_freq = date_range('2012-01-01', '2012-01-03', freq='d')
some_cols = ['a', 'b']
index_freq = df.reindex(index=time_freq).index.freq
both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq
seq_freq = df.reindex(index=time_freq).reindex(
columns=some_cols).index.freq
self.assertEqual(index_freq, both_freq)
self.assertEqual(index_freq, seq_freq)
def test_reindex_fill_value(self):
df = DataFrame(np.random.randn(10, 4))
# axis=0
result = df.reindex(lrange(15))
self.assertTrue(np.isnan(result.values[-5:]).all())
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
# axis=1
result = df.reindex(columns=lrange(5), fill_value=0.)
expected = df.copy()
expected[4] = 0.
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value=0)
expected = df.copy()
expected[4] = 0
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value='foo')
expected = df.copy()
expected[4] = 'foo'
assert_frame_equal(result, expected)
# reindex_axis
result = df.reindex_axis(lrange(15), fill_value=0., axis=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
result = df.reindex_axis(lrange(5), fill_value=0., axis=1)
expected = df.reindex(columns=lrange(5)).fillna(0)
assert_frame_equal(result, expected)
# other dtypes
df['foo'] = 'foo'
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
def test_reindex_dups(self):
# GH4746, reindex on duplicate index error messages
arr = np.random.randn(10)
df = DataFrame(arr, index=[1, 2, 3, 4, 5, 1, 2, 3, 4, 5])
# set index is ok
result = df.copy()
result.index = list(range(len(df)))
expected = DataFrame(arr, index=list(range(len(df))))
assert_frame_equal(result, expected)
# reindex fails
self.assertRaises(ValueError, df.reindex, index=list(range(len(df))))
def test_align(self):
af, bf = self.frame.align(self.frame)
self.assertIsNot(af._data, self.frame._data)
af, bf = self.frame.align(self.frame, copy=False)
self.assertIs(af._data, self.frame._data)
# axis = 0
other = self.frame.ix[:-5, :3]
af, bf = self.frame.align(other, axis=0, fill_value=-1)
self.assert_index_equal(bf.columns, other.columns)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
self.assertTrue((diff_a_vals == -1).all())
af, bf = self.frame.align(other, join='right', axis=0)
self.assert_index_equal(bf.columns, other.columns)
self.assert_index_equal(bf.index, other.index)
self.assert_index_equal(af.index, other.index)
# axis = 1
other = self.frame.ix[:-5, :3].copy()
af, bf = self.frame.align(other, axis=1)
self.assert_index_equal(bf.columns, self.frame.columns)
self.assert_index_equal(bf.index, other.index)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
# TODO(wesm): unused?
diff_b_vals = bf.reindex(diff_b).values # noqa
self.assertTrue((diff_a_vals == -1).all())
af, bf = self.frame.align(other, join='inner', axis=1)
self.assert_index_equal(bf.columns, other.columns)
af, bf = self.frame.align(other, join='inner', axis=1, method='pad')
self.assert_index_equal(bf.columns, other.columns)
# test other non-float types
af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')
self.assert_index_equal(bf.columns, other.columns)
af, bf = self.mixed_frame.align(self.mixed_frame,
join='inner', axis=1, method='pad')
self.assert_index_equal(bf.columns, self.mixed_frame.columns)
af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=None)
self.assert_index_equal(bf.index, Index([]))
af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assert_index_equal(bf.index, Index([]))
# mixed floats/ints
af, bf = self.mixed_float.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assert_index_equal(bf.index, Index([]))
af, bf = self.mixed_int.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assert_index_equal(bf.index, Index([]))
# try to align dataframe to series along bad axis
self.assertRaises(ValueError, self.frame.align, af.ix[0, :3],
join='inner', axis=2)
# align dataframe to series with broadcast or not
idx = self.frame.index
s = Series(range(len(idx)), index=idx)
left, right = self.frame.align(s, axis=0)
tm.assert_index_equal(left.index, self.frame.index)
tm.assert_index_equal(right.index, self.frame.index)
self.assertTrue(isinstance(right, Series))
left, right = self.frame.align(s, broadcast_axis=1)
tm.assert_index_equal(left.index, self.frame.index)
expected = {}
for c in self.frame.columns:
expected[c] = s
expected = DataFrame(expected, index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(right, expected)
# GH 9558
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
result = df[df['a'] == 2]
expected = DataFrame([[2, 5]], index=[1], columns=['a', 'b'])
assert_frame_equal(result, expected)
result = df.where(df['a'] == 2, 0)
expected = DataFrame({'a': [0, 2, 0], 'b': [0, 5, 0]})
assert_frame_equal(result, expected)
def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):
aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit,
fill_axis=fill_axis)
join_index, join_columns = None, None
ea, eb = a, b
if axis is None or axis == 0:
join_index = a.index.join(b.index, how=how)
ea = ea.reindex(index=join_index)
eb = eb.reindex(index=join_index)
if axis is None or axis == 1:
join_columns = a.columns.join(b.columns, how=how)
ea = ea.reindex(columns=join_columns)
eb = eb.reindex(columns=join_columns)
ea = ea.fillna(axis=fill_axis, method=method, limit=limit)
eb = eb.fillna(axis=fill_axis, method=method, limit=limit)
assert_frame_equal(aa, ea)
assert_frame_equal(ab, eb)
def test_align_fill_method_inner(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('inner', meth, ax, fax)
def test_align_fill_method_outer(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('outer', meth, ax, fax)
def test_align_fill_method_left(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('left', meth, ax, fax)
def test_align_fill_method_right(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('right', meth, ax, fax)
def _check_align_fill(self, kind, meth, ax, fax):
left = self.frame.ix[0:4, :10]
right = self.frame.ix[2:, 6:]
empty = self.frame.ix[:0, :0]
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty left
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty right
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# both empty
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
def test_align_int_fill_bug(self):
# GH #910
X = np.arange(10 * 10, dtype='float64').reshape(10, 10)
Y = np.ones((10, 1), dtype=int)
df1 = DataFrame(X)
df1['0.X'] = Y.squeeze()
df2 = df1.astype(float)
result = df1 - df1.mean()
expected = df2 - df2.mean()
assert_frame_equal(result, expected)
def test_align_multiindex(self):
# GH 10665
# same test cases as test_align_multiindex in test_series.py
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
df1 = pd.DataFrame(np.arange(12, dtype='int64'), index=midx)
df2 = pd.DataFrame(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = df1.align(df2, join='left')
res2l, res2r = df2.align(df1, join='right')
expl = df1
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
res1l, res1r = df1.align(df2, join='right')
res2l, res2r = df2.align(df1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
def test_align_series_combinations(self):
df = pd.DataFrame({'a': [1, 3, 5],
'b': [1, 3, 5]}, index=list('ACE'))
s = pd.Series([1, 2, 4], index=list('ABD'), name='x')
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = pd.DataFrame({'a': [1, np.nan, 3, np.nan, 5],
'b': [1, np.nan, 3, np.nan, 5]},
index=list('ABCDE'))
exp2 = pd.Series([1, 2, np.nan, 4, np.nan],
index=list('ABCDE'), name='x')
tm.assert_frame_equal(res1, exp1)
tm.assert_series_equal(res2, exp2)
# series + frame
res1, res2 = s.align(df)
tm.assert_series_equal(res1, exp2)
tm.assert_frame_equal(res2, exp1)
def test_filter(self):
# items
filtered = self.frame.filter(['A', 'B', 'E'])
self.assertEqual(len(filtered.columns), 2)
self.assertNotIn('E', filtered)
filtered = self.frame.filter(['A', 'B', 'E'], axis='columns')
self.assertEqual(len(filtered.columns), 2)
self.assertNotIn('E', filtered)
# other axis
idx = self.frame.index[0:4]
filtered = self.frame.filter(idx, axis='index')
expected = self.frame.reindex(index=idx)
assert_frame_equal(filtered, expected)
# like
fcopy = self.frame.copy()
fcopy['AA'] = 1
filtered = fcopy.filter(like='A')
self.assertEqual(len(filtered.columns), 2)
self.assertIn('AA', filtered)
# like with ints in column names
df = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '_A', '_B'])
filtered = df.filter(like='_')
self.assertEqual(len(filtered.columns), 2)
# regex with ints in column names
# from PR #10384
df = DataFrame(0., index=[0, 1, 2], columns=['A1', 1, 'B', 2, 'C'])
expected = DataFrame(
0., index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object))
filtered = df.filter(regex='^[0-9]+$')
assert_frame_equal(filtered, expected)
expected = DataFrame(0., index=[0, 1, 2], columns=[0, '0', 1, '1'])
# shouldn't remove anything
filtered = expected.filter(regex='^[0-9]+$')
assert_frame_equal(filtered, expected)
# pass in None
with assertRaisesRegexp(TypeError, 'Must pass'):
self.frame.filter(items=None)
# objects
filtered = self.mixed_frame.filter(like='foo')
self.assertIn('foo', filtered)
# unicode columns, won't ascii-encode
df = self.frame.rename(columns={'B': u('\u2202')})
filtered = df.filter(like='C')
self.assertTrue('C' in filtered)
def test_filter_regex_search(self):
fcopy = self.frame.copy()
fcopy['AA'] = 1
# regex
filtered = fcopy.filter(regex='[A]+')
self.assertEqual(len(filtered.columns), 2)
self.assertIn('AA', filtered)
# doesn't have to be at beginning
df = DataFrame({'aBBa': [1, 2],
'BBaBB': [1, 2],
'aCCa': [1, 2],
'aCCaBB': [1, 2]})
result = df.filter(regex='BB')
exp = df[[x for x in df.columns if 'BB' in x]]
assert_frame_equal(result, exp)
def test_filter_corner(self):
empty = DataFrame()
result = empty.filter([])
assert_frame_equal(result, empty)
result = empty.filter(like='foo')
assert_frame_equal(result, empty)
def test_select(self):
f = lambda x: x.weekday() == 2
result = self.tsframe.select(f, axis=0)
expected = self.tsframe.reindex(
index=self.tsframe.index[[f(x) for x in self.tsframe.index]])
assert_frame_equal(result, expected)
result = self.frame.select(lambda x: x in ('B', 'D'), axis=1)
expected = self.frame.reindex(columns=['B', 'D'])
# TODO should reindex check_names?
assert_frame_equal(result, expected, check_names=False)
def test_take(self):
# homogeneous
order = [3, 1, 2, 0]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['D', 'B', 'C', 'A']]
assert_frame_equal(result, expected, check_names=False)
# neg indicies
order = [2, 1, -1]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['C', 'B', 'D']]
assert_frame_equal(result, expected, check_names=False)
# illegal indices
self.assertRaises(IndexError, df.take, [3, 1, 2, 30], axis=0)
self.assertRaises(IndexError, df.take, [3, 1, 2, -31], axis=0)
self.assertRaises(IndexError, df.take, [3, 1, 2, 5], axis=1)
self.assertRaises(IndexError, df.take, [3, 1, 2, -5], axis=1)
# mixed-dtype
order = [4, 1, 2, 0, 3]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['foo', 'B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
# neg indicies
order = [4, 1, -2]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['foo', 'B', 'D']]
assert_frame_equal(result, expected)
# by dtype
order = [1, 2, 0, 3]
for df in [self.mixed_float, self.mixed_int]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
def test_reindex_boolean(self):
frame = DataFrame(np.ones((10, 2), dtype=bool),
index=np.arange(0, 20, 2),
columns=[0, 2])
reindexed = frame.reindex(np.arange(10))
self.assertEqual(reindexed.values.dtype, np.object_)
self.assertTrue(isnull(reindexed[0][1]))
reindexed = frame.reindex(columns=lrange(3))
self.assertEqual(reindexed.values.dtype, np.object_)
self.assertTrue(isnull(reindexed[1]).all())
def test_reindex_objects(self):
reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B'])
self.assertIn('foo', reindexed)
reindexed = self.mixed_frame.reindex(columns=['A', 'B'])
self.assertNotIn('foo', reindexed)
def test_reindex_corner(self):
index = Index(['a', 'b', 'c'])
dm = self.empty.reindex(index=[1, 2, 3])
reindexed = dm.reindex(columns=index)
self.assert_index_equal(reindexed.columns, index)
# ints are weird
smaller = self.intframe.reindex(columns=['A', 'B', 'E'])
self.assertEqual(smaller['E'].dtype, np.float64)
def test_reindex_axis(self):
cols = ['A', 'B', 'E']
reindexed1 = self.intframe.reindex_axis(cols, axis=1)
reindexed2 = self.intframe.reindex(columns=cols)
assert_frame_equal(reindexed1, reindexed2)
rows = self.intframe.index[0:5]
reindexed1 = self.intframe.reindex_axis(rows, axis=0)
reindexed2 = self.intframe.reindex(index=rows)
assert_frame_equal(reindexed1, reindexed2)
self.assertRaises(ValueError, self.intframe.reindex_axis, rows, axis=2)
# no-op case
cols = self.frame.columns.copy()
newFrame = self.frame.reindex_axis(cols, axis=1)
assert_frame_equal(newFrame, self.frame)
def test_reindex_with_nans(self):
df = DataFrame([[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],
columns=['a', 'b'],
index=[100.0, 101.0, np.nan, 102.0, 103.0])
result = df.reindex(index=[101.0, 102.0, 103.0])
expected = df.iloc[[1, 3, 4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[103.0])
expected = df.iloc[[4]]
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import streamlit as st
import altair as alt
import pandas as pd
import numpy as np
import requests
import matplotlib.pyplot as plt
import plotly.express as px
from pathlib import Path
from functools import lru_cache
import statsmodels.formula.api as smf
from datetime import datetime
import pandasdmx as pdmx
plt.style.use(
"https://github.com/aeturrell/coding-for-economists/raw/main/plot_style.txt"
)
@st.cache
def prep_gdp_output_codes():
hdf = pd.read_excel(Path("data", "uk_gdp_output_hierarchy.xlsx"), header=None)
hdf = hdf.dropna(how="all", axis=1)
for i in range(3):
hdf.iloc[i, :] = hdf.iloc[i, :].fillna(method="ffill")
hdf = hdf.T
hdf["total"] = hdf[3].str.contains("Total")
hdf = hdf.query("total==False")
hdf = hdf.drop("total", axis=1)
for col in range(5):
hdf[col] = hdf[col].str.lstrip().str.rstrip()
hdf = hdf.rename(columns={4: "section", 5: "code"})
return hdf
def get_uk_regional_gdp():
# current year
latest_year = datetime.now().year - 1
# Tell pdmx we want OECD data
oecd = pdmx.Request("OECD")
# Set out everything about the request in the format specified by the OECD API
data = oecd.data(
resource_id="REGION_ECONOM",
key="1+2.UKC.SNA_2008.GDP.REG+CURR_PR.ALL.2017+2018+2019+2020/all?",
).to_pandas()
# example that works:
"https://stats.oecd.org/restsdmx/sdmx.ashx/GetData/REGION_ECONOM/1+2.GBR+UKC+UKC11+UKC12.SNA_2008.GDP.REG+CURR_PR+USD_PPP+REAL_PR+REAL_PPP+PC+PC_CURR_PR+PC_USD_PPP+PC_REAL_PR+PC_REAL_PPP.ALL.2001+2002+2003+2004+2005+2006+2007+2008+2009+2010+2011+2012+2013+2014+2015+2016+2017+2018+2019+2020/all?"
df = pd.DataFrame(data).reset_index()
df.head()
@st.cache
def ons_blue_book_data(code):
data = grab_ONS_time_series_data("BB", code)
xf = pd.DataFrame(pd.json_normalize(data["years"]))
xf = xf[["year", "value"]]
xf["year"] = xf["year"].astype(int)
xf["value"] = xf["value"].astype(float)
xf["title"] = data["description"]["title"]
xf["code"] = code
xf = pd.DataFrame(xf.loc[xf["year"].argmax(), :]).T
return xf
@st.cache
@lru_cache(maxsize=32)
def ons_get_gdp_output_with_breakdown():
df = prep_gdp_output_codes()
xf = pd.DataFrame()
for code in df["code"].unique():
xf = pd.concat([xf, ons_blue_book_data(code)], axis=0)
df = pd.merge(df, xf, on=["code"], how="inner")
# for later treemap use, only use highest level name if hierachy has
# missing levels
df.loc[(df[1] == df[2]) & (df[3] == df[2]) & (df[3] == df[0]), [3, 2, 1]] = None
df.loc[(df[1] == df[2]) & (df[3] == df[2]), [3, 2]] = None
df.loc[(df[1] == df[2]), [2]] = None
# now, any nones with non-none children must be swapped
df.loc[(df[2].isnull()) & (~df[3].isnull()), [2, 3]] = df.loc[
(df[2].isnull()) & (~df[3].isnull()), [3, 2]
].values
df.loc[(df[0] == df[1]), [1]] = df.loc[(df[0] == df[1]), [2]].values
df.loc[(df[1] == df[2]), [2]] = df.loc[(df[1] == df[2]), [3]].values
# another round of this
df.loc[(df[1] == df[2]) & (df[3] == df[2]) & (df[3] == df[0]), [3, 2, 1]] = None
df.loc[(df[1] == df[2]) & (df[3] == df[2]), [3, 2]] = None
df.loc[(df[1] == df[2]), [2]] = None
df.loc[(df[3] == df[2]), [3]] = None
return df
@st.cache
def grab_ONS_time_series_data(dataset_id, timeseries_id):
"""
This function grabs specified time series from the ONS API.
"""
api_endpoint = "https://api.ons.gov.uk/"
api_params = {"dataset": dataset_id, "timeseries": timeseries_id}
url = (
api_endpoint
+ "/".join(
[x + "/" + y for x, y in zip(api_params.keys(), api_params.values())][::-1]
)
+ "/data"
)
return requests.get(url).json()
def ons_clean_qna_data(data):
if data["quarters"] != []:
df = pd.DataFrame( | pd.json_normalize(data["quarters"]) | pandas.json_normalize |
# Preprocessing
import os, matplotlib
if 'DISPLAY' not in os.environ:
matplotlib.use('Pdf')
import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('display.max_rows', 50)
import numpy as np
import xgboost as xgb
import xgbfir
import pdb
import time
np.random.seed(1337)
def client_anaylsis():
"""
The idea here is to unify the client ID of several different customers to more broad categories.
"""
# clean duplicate spaces in client names
client_df = pd.read_csv("../data/cliente_tabla.csv.zip", compression="zip")
client_df["NombreCliente"] = client_df["NombreCliente"].str.lower()
client_df["NombreCliente"] = client_df["NombreCliente"].apply(lambda x: " ".join(x.split()))
client_df = client_df.drop_duplicates(subset="Cliente_ID")
special_list = ["^(yepas)\s.*", "^(oxxo)\s.*", "^(bodega\scomercial)\s.*", "^(bodega\saurrera)\s.*", "^(bodega)\s.*",
"^(woolwort|woolworth)\s.*", "^(zona\sexpress)\s.*",
"^(zacatecana)\s.*", "^(yza)\s.*",
"^(yanet)\s.*", "^(yak)\s.*",
"^(wings)\s.*", "^(wendy)\s.*", "^(walmart\ssuper)\s?.*", "^(waldos)\s.*",
"^(wal\smart)\s.*", "^(vulcanizadora)\s.*", "^(viveres\sy\sservicios)\s.*",
"^(vips)\s.*", "^(vinos\sy\slicores)\s.*", "^(tienda\ssuper\sprecio)\s.*",
"^(vinos\sy\sabarrotes)\s.*", "^(vinateria)\s.*", "^(video\sjuegos)\s.*", "^(universidad)\s.*",
"^(tiendas\stres\sb)\s.*", "^(toks)\s.*","^(tkt\ssix)\s.*",
"^(torteria)\s.*", "^(tortas)\s.*", "^(super\sbara)\s.*",
"^(tiendas\sde\ssuper\sprecio)\s.*", "^(ultramarinos)\s.*", "^(tortilleria)\s.*",
"^(tienda\sde\sservicio)\s.*", "^(super\sx)\s.*", "^(super\swillys)\s.*",
"^(super\ssanchez)\s.*", "^(super\sneto)\s.*", "^(super\skompras)\s.*",
"^(super\skiosco)\s.*", "^(super\sfarmacia)\s.*", "^(super\scarnes)\s.*",
"^(super\scarniceria)\s.*", "^(soriana)\s.*", "^(super\scenter)\s.*",
"^(solo\sun\sprecio)\s.*", "^(super\scity)\s.*", "^(super\sg)\s.*", "^(super\smercado)\s.*",
"^(sdn)\s.*", "^(sams\sclub)\s.*", "^(papeleria)\s.*", "^(multicinemas)\s.*",
"^(mz)\s.*", "^(motel)\s.*", "^(minisuper)\s.*", "^(mini\stienda)\s.*",
"^(mini\ssuper)\s.*", "^(mini\smarket)\s.*", "^(mini\sabarrotes)\s.*", "^(mi\sbodega)\s.*",
"^(merza|merzapack)\s.*", "^(mercado\ssoriana)\s.*", "^(mega\scomercial)\s.*",
"^(mc\sdonalds)\s.*", "^(mb)\s[^ex].*", "^(maquina\sfma)\s.*", "^(ley\sexpress)\s.*",
"^(lavamatica)\s.*", "^(kiosko)\s.*", "^(kesos\sy\skosas)\s.*", "^(issste)\s.*",
"^(hot\sdogs\sy\shamburguesas|)\s.*", "^(hamburguesas\sy\shot\sdogs)\s.*", "(hot\sdog)",
"^(hospital)\s.*", "^(hiper\ssoriana)\s.*", "^(super\sahorros)\s.*", "^(super\sabarrotes)\s.*",
"^(hambuerguesas|hamburguesas|hamburgesas)\s.*", "^(gran\sbodega)\s.*",
"^(gran\sd)\s.*", "^(go\smart)\s.*", "^(gasolinera)\s.*", "^(fundacion)\s.*",
"^(fruteria)\s.*", "^(frutas\sy\sverduras)\s.*", "^(frutas\sy\slegumbres)\s.*",
"^(frutas\sy\sabarrotes)\s.*", "^(fma)\s.*", "^(fiesta\sinn)\s.*", "^(ferreteria)\s.*",
"^(farmacon)\s.*", "^(farmacias)\s.*", "^(farmacia\syza)\s.*",
"^(farmacia\smoderna)\s.*", "^(farmacia\slopez)\s.*",
"^(farmacia\sissste)\s.*", "^(farmacia\sisseg)\s.*", "^(farmacia\sguadalajara)\s.*",
"^(farmacia\sesquivar)\s.*", "^(farmacia\scalderon)\s.*", "^(farmacia\sbenavides)\s.*",
"^(farmacia\sabc)\s.*", "^(farmacia)\s.*", "^(farm\sguadalajara)\s.*",
"^(facultad\sde)\s.*", "^(f\sgdl)\s.*", "^(expendio)\s.*", "^(expendio\sde\span)\s.*",
"^(expendio\sde\shuevo)\s.*", "^(expendio\sbimbo)\s.*", "^(expendedoras\sautomaticas)\s.*",
"^(estic)\s.*", "^(estancia\sinfantil)\s.*", "^(estacionamiento)\s.*", "^(estanquillo)\s.*",
"^(estacion\sde\sservicio)\s.*", "^(establecimientos?)\s.*",
"^(escuela\suniversidad|esc\suniversidad)\s.*", "^(escuela\stelesecundaria|esc\stelesecundaria)\s.*",
"^(escuela\stecnica|esc\stecnica)\s.*",
"^(escuela\ssuperior|esc\ssuperior)\s.*", "^(escuela\ssecundaria\stecnica|esc\ssecundaria\stecnica)\s.*",
"^(escuela\ssecundaria\sgeneral|esc\ssecundaria\sgeneral)\s.*",
"^(escuela\ssecundaria\sfederal|esc\ssecundaria\sfederal)\s.*",
"^(escuela\ssecundaria|esc\ssecundaria)\s.*", "^(escuela\sprimaria|esc\sprimaria)\s.*",
"^(escuela\spreparatoria|esc\spreparatoria)\s.*", "^(escuela\snormal|esc\snormal)\s.*",
"^(escuela\sinstituto|esc\sinstituto)\s.*", "^(esc\sprepa|esc\sprep)\s.*",
"^(escuela\scolegio|esc\scolegio)\s.*", "^(escuela|esc)\s.*", "^(dunosusa)\s.*",
"^(ferreteria)\s.*", "^(dulces)\s.*", "^(dulceria)\s.*", "^(dulce)\s.*", "^(distribuidora)\s.*",
"^(diconsa)\s.*", "^(deposito)\s.*", "^(del\srio)\s.*", "^(cyber)\s.*", "^(cremeria)\s.*",
"^(cosina\seconomica)\s.*", "^(copy).*", "^(consumo|consumos)\s.*","^(conalep)\s.*",
"^(comercializadora)\s.*", "^(comercial\ssuper\salianza)\s.*",
"^(comercial\smexicana)\s.*", "^(comedor)\s.*", "^(colegio\sde\sbachilleres)\s.*",
"^(colegio)\s.*", "^(coffe).*", "^(cocteleria|cockteleria)\s.*", "^(cocina\seconomica)\s.*",
"^(cocina)\s.*", "^(cobaev)\s.*", "^(cobaes)\s.*", "^(cobaeh)\s.*", "^(cobach)\s.*",
"^(club\sde\sgolf)\s.*", "^(club\scampestre)\s.*", "^(city\sclub)\s.*", "^(circulo\sk)\s.*",
"^(cinepolis)\s.*", "^(cinemex)\s.*", "^(cinemas)\s.*", "^(cinemark)\s.*", "^(ciber)\s.*",
"^(church|churchs)\s.*", "^(chilis)\s.*", "^(chiles\sy\ssemillas)\s.*", "^(chiles\ssecos)\s.*",
"^(chedraui)\s.*", "^(cetis)\s.*", "^(cervefrio)\s.*", "^(cervefiesta)\s.*",
"^(cerveceria)\s.*", "^(cervecentro)\s.*", "^(centro\sescolar)\s.*", "^(centro\seducativo)\s.*",
"^(centro\sde\sestudios)\s.*", "^(centro\scomercial)\s.*", "^(central\sde\sautobuses)\s.*",
"^(cecytem)\s.*", "^(cecytec)\s.*", "^(cecyte)\s.*", "^(cbtis)\s.*", "^(cbta)\s.*", "^(cbt)\s.*",
"^(caseta\stelefonica)\s.*", "^(caseta)\s.*", "^(casa\sley)\s.*", "^(casa\shernandez)\s.*",
"^(cartonero\scentral)\s.*", "^(carniceria)\s.*", "^(carne\smart)\s.*", "^(calimax)\s.*",
"^(cajero)\s.*", "^(cafeteria)\s.*", "^(cafe)\s.*", "^(burritos)\s.*",
"^(burguer\sking|burger\sking)\s.*", "^(bip)\s.*", "^(bimbo\sexpendio)\s.*",
"^(burguer|burger)\s.*", "^(ba.os)\s.*", "^(bae)\s.*", "^(bachilleres)\s.*", "^(bachillerato)\s.*",
"^(autosercivio|auto\sservicio)\s.*", "^(autolavado|auto\slavado)\s.*",
"^(autobuses\sla\spiedad|autobuses\sde\sla\piedad)\s.*", "^(arrachera)\s.*",
"^(alsuper\sstore)\s.*", "^(alsuper)\s.*", "^(academia)\s.*", "^(abts)\s.*",
"^(abarrotera\slagunitas)\s.*", "^(abarrotera)\s.*", "^(abarrotes\sy\svinos)\s.*",
"^(abarrotes\sy\sverduras)\s.*", "^(abarrotes\sy\ssemillas)\s.*",
"^(abarrotes\sy\spapeleria)\s.*", "^(abarrotes\sy\snovedades)\s.*", "^(abarrotes\sy\sfruteria)\s.*",
"^(abarrotes\sy\sdeposito)\s.*", "^(abarrotes\sy\scremeria)\s.*", "^(abarrotes\sy\scarniceria)\s.*",
"^(abarrotes\svinos\sy\slicores)\s.*", "^(abarrote|abarrotes|abarotes|abarr|aba|ab)\s.*",
"^(7\seleven)\s.*", "^(7\s24)\s.*"]
client_df["NombreCliente2"] = client_df["NombreCliente"]
for var in special_list:
client_df[var] = client_df["NombreCliente"].str.extract(var, expand=False).str.upper()
replace = client_df.loc[~client_df[var].isnull(), var]
client_df.loc[~client_df[var].isnull(),"NombreCliente2"] = replace
client_df.drop(var, axis=1, inplace=True)
client_df.drop("NombreCliente", axis=1, inplace=True)
client_df.to_csv("../data/cliente_tabla2.csv.gz", compression="gzip", index=False)
def client_anaylsis2():
"""
The idea here is to unify the client ID of several different customers to more broad categories in another
different way
"""
client_df = pd.read_csv("../data/cliente_tabla.csv.zip", compression="zip")
# clean duplicate spaces in client names
client_df["NombreCliente"] = client_df["NombreCliente"].str.upper()
client_df["NombreCliente"] = client_df["NombreCliente"].apply(lambda x: " ".join(x.split()))
client_df = client_df.drop_duplicates(subset="Cliente_ID")
# --- Begin Filtering for specific terms
# Note that the order of filtering is significant.
# For example:
# The regex of .*ERIA.* will assign "FRUITERIA" to 'Eatery' rather than 'Fresh Market'.
# In other words, the first filters to occur have a bigger priority.
def filter_specific(vf2):
# Known Large Company / Special Group Types
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*REMISION.*', 'Consignment')
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*WAL MART.*', '.*SAMS CLUB.*'], 'Walmart', regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*OXXO.*', 'Oxxo Store')
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*CONASUPO.*', 'Govt Store')
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*BIMBO.*', 'Bimbo Store')
# General term search for a random assortment of words I picked from looking at
# their frequency of appearance in the data and common spanish words for these categories
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*COLEG.*', '.*UNIV.*', '.*ESCU.*', '.*INSTI.*', \
'.*PREPAR.*'], 'School', regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*PUESTO.*', 'Post')
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*FARMA.*', '.*HOSPITAL.*', '.*CLINI.*', '.*BOTICA.*'],
'Hospital/Pharmacy', regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*CAFE.*', '.*CREMERIA.*', '.*DULCERIA.*', \
'.*REST.*', '.*BURGER.*', '.*TACO.*', '.*TORTA.*', \
'.*TAQUER.*', '.*HOT DOG.*', '.*PIZZA.*' \
'.*COMEDOR.*', '.*ERIA.*', '.*BURGU.*'], 'Eatery',
regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*SUPER.*', 'Supermarket')
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*COMERCIAL.*', '.*BODEGA.*', '.*DEPOSITO.*', \
'.*ABARROTES.*', '.*MERCADO.*', '.*CAMBIO.*', \
'.*MARKET.*', '.*MART .*', '.*MINI .*', \
'.*PLAZA.*', '.*MISC.*', '.*ELEVEN.*', '.*EXP.*', \
'.*SNACK.*', '.*PAPELERIA.*', '.*CARNICERIA.*', \
'.*LOCAL.*', '.*COMODIN.*', '.*PROVIDENCIA.*'
], 'General Market/Mart' \
, regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*VERDU.*', '.*FRUT.*'], 'Fresh Market', regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*HOTEL.*', '.*MOTEL.*', ".*CASA.*"], 'Hotel', regex=True)
filter_specific(client_df)
# --- Begin filtering for more general terms
# The idea here is to look for names with particles of speech that would
# not appear in a person's name.
# i.e. "Individuals" should not contain any participles or numbers in their names.
def filter_participle(vf2):
vf2['NombreCliente'] = vf2['NombreCliente'].replace([
'.*LA .*', '.*EL .*', '.*DE .*', '.*LOS .*', '.*DEL .*', '.*Y .*', '.*SAN .*', '.*SANTA .*', \
'.*AG .*', '.*LAS .*', '.*MI .*', '.*MA .*', '.*II.*', '.*[0-9]+.*' \
], 'Small Franchise', regex=True)
filter_participle(client_df)
# Any remaining entries should be "Individual" Named Clients, there are some outliers.
# More specific filters could be used in order to reduce the percentage of outliers in this final set.
def filter_remaining(vf2):
def function_word(data):
# Avoid the single-words created so far by checking for upper-case
if (data.isupper()) and (data != "NO IDENTIFICADO"):
return 'Individual'
else:
return data
vf2['NombreCliente'] = vf2['NombreCliente'].map(function_word)
filter_remaining(client_df)
client_df.rename(columns={"NombreCliente": "client_name3"}, inplace=True)
client_df.to_csv("../data/cliente_tabla3.csv.gz", compression="gzip", index=False)
def preprocess(save=False):
start = time.time()
dtype_dict = {"Semana": np.uint8, 'Agencia_ID': np.uint16, 'Canal_ID': np.uint8,
'Ruta_SAK': np.uint16, 'Cliente_ID': np.uint32, 'Producto_ID': np.uint16,
'Demanda_uni_equil': np.uint32, "Venta_hoy": np.float32, "Venta_uni_hoy": np.uint32,
"Dev_uni_proxima": np.uint32, "Dev_proxima": np.float32}
train = pd.read_csv("../data/train.csv.zip", compression="zip", dtype=dtype_dict)
test = pd.read_csv("../data/test.csv.zip", compression="zip", dtype=dtype_dict)
# train = train.sample(100000)
# test = test.sample(100000)
# We calculate out-of-sample mean features from most of the training data and only train from the samples in week 9.
# Out-of-sample mean features for training are calculated from all weeks before week 9 and for the test set from
# all weeks including week 9
mean_dataframes = {}
mean_dataframes["train"] = train[train["Semana"]<9].copy()
mean_dataframes["test"] = train.copy()
print("complete train obs: {}".format(len(train)))
print("train week 9 obs: {}".format(len(train[train["Semana"] == 9])))
train = train[train["Semana"] == 9]
# not used in later stages. Was used to find the right hyperparameters for XGBoost. After finding them and to
# obtain the best solution the evaluation data was incorporated into the training data and the hyperparameters
# were used "blindly"
# eval = train.iloc[int(len(train) * 0.75):, :].copy()
# print("eval obs: {}".format(len(eval)))
# mean_dataframes["eval"] = mean_dataframes["test"].iloc[:eval.index.min(), :].copy()
# train = train.iloc[:int(len(train) * 0.75), :]
# print("train obs: {}".format(len(train)))
# read data files and create new client ids
town = pd.read_csv("../data/town_state.csv.zip", compression="zip")
product = pd.read_csv("../data/producto_tabla.csv.zip", compression="zip")
client = pd.read_csv("../data/cliente_tabla.csv.zip", compression="zip")
client2 = pd.read_csv("../data/cliente_tabla2.csv.gz")
client2.rename(columns={"NombreCliente2": "client_name2"}, inplace=True)
client3 = | pd.read_csv("../data/cliente_tabla3.csv.gz") | pandas.read_csv |
#!/usr/bin/env python3
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--task', default='yelp', choices=['yelp'])
parser.add_argument('--mode', default='train', choices=['train', 'eval'])
parser.add_argument('--checkpoint-frequency', type=int, default=100)
parser.add_argument('--eval-frequency', type=int, default=10000)
parser.add_argument('--batch-size', type=int, default=30)
parser.add_argument("--device", default="/cpu:0")
parser.add_argument("--max-grad-norm", type=float, default=5.0)
parser.add_argument("--lr", type=float, default=0.001)
args = parser.parse_args()
import importlib
import os
import pickle
import random
import time
from collections import Counter, defaultdict
import numpy as np
import pandas as pd
import spacy
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
from tqdm import tqdm
import ujson
from data_util import batch
task_name = args.task
task = importlib.import_module(task_name)
checkpoint_dir = os.path.join(task.train_dir, 'checkpoint')
tflog_dir = os.path.join(task.train_dir, 'tflog')
checkpoint_name = task_name + '-model'
checkpoint_dir = os.path.join(task.train_dir, 'checkpoints')
checkpoint_path = os.path.join(checkpoint_dir, checkpoint_name)
# @TODO: move calculation into `task file`
trainset = task.read_trainset(epochs=1)
class_weights = pd.Series(Counter([l for _, l in trainset]))
class_weights = 1/(class_weights/class_weights.mean())
class_weights = class_weights.to_dict()
vocab = task.read_vocab()
labels = task.read_labels()
classes = max(labels.values())+1
vocab_size = task.vocab_size
labels_rev = {int(v): k for k, v in labels.items()}
vocab_rev = {int(v): k for k, v in vocab.items()}
def HAN_model_1(session, restore_only=False):
"""Hierarhical Attention Network"""
import tensorflow as tf
try:
from tensorflow.contrib.rnn import GRUCell, MultiRNNCell, DropoutWrapper
except ImportError:
MultiRNNCell = tf.nn.rnn_cell.MultiRNNCell
GRUCell = tf.nn.rnn_cell.GRUCell
from bn_lstm import BNLSTMCell
from HAN_model import HANClassifierModel
is_training = tf.placeholder(dtype=tf.bool, name='is_training')
cell = BNLSTMCell(80, is_training) # h-h batchnorm LSTMCell
# cell = GRUCell(30)
cell = MultiRNNCell([cell]*5)
model = HANClassifierModel(
vocab_size=vocab_size,
embedding_size=200,
classes=classes,
word_cell=cell,
sentence_cell=cell,
word_output_size=100,
sentence_output_size=100,
device=args.device,
learning_rate=args.lr,
max_grad_norm=args.max_grad_norm,
dropout_keep_proba=0.5,
is_training=is_training,
)
saver = tf.train.Saver(tf.global_variables())
checkpoint = tf.train.get_checkpoint_state(checkpoint_dir)
if checkpoint:
print("Reading model parameters from %s" % checkpoint.model_checkpoint_path)
saver.restore(session, checkpoint.model_checkpoint_path)
elif restore_only:
raise FileNotFoundError("Cannot restore model")
else:
print("Created model with fresh parameters")
session.run(tf.global_variables_initializer())
# tf.get_default_graph().finalize()
return model, saver
model_fn = HAN_model_1
def decode(ex):
print('text: ' + '\n'.join([' '.join([vocab_rev.get(wid, '<?>') for wid in sent]) for sent in ex[0]]))
print('label: ', labels_rev[ex[1]])
print('data loaded')
def batch_iterator(dataset, batch_size, max_epochs):
for i in range(max_epochs):
xb = []
yb = []
for ex in dataset:
x, y = ex
xb.append(x)
yb.append(y)
if len(xb) == batch_size:
yield xb, yb
xb, yb = [], []
def ev(session, model, dataset):
predictions = []
labels = []
examples = []
for x, y in tqdm(batch_iterator(dataset, args.batch_size, 1)):
examples.extend(x)
labels.extend(y)
predictions.extend(session.run(model.prediction, model.get_feed_data(x, is_training=False)))
df = | pd.DataFrame({'predictions': predictions, 'labels': labels, 'examples': examples}) | pandas.DataFrame |
import json
import sys
import pprint
import pandas as pd
from .KeyWordsSearch import search_phrases
from .Preprocessing_tools import full_preprocess_text, prepare_files, open_text
from .constnats import key_phrase, key_meal, key_category, greeting_key, farewell_key, pay_key, \
additional_key, new_key, order_key, operator_id, client_id, vacancy_key, key_answer
from ..dirs import STAGES_FILE, DATA_NORMALIZED_SELLER_DIR, DATA_NORMALIZED_CLIENT_DIR
STAGE_NAMING = {'приветствие': 'Greeting', 'новинка': 'OfferOfNewProductsOrPromotions', 'заказ': 'Order',
'доп': 'AdditionalSales', 'оплата': 'Payment', 'прощание': 'CompletionSale'}
ADDITION_NAMING = {'соус': 'Sauces', 'напитк': 'Drinks', 'пив': 'Drinks', 'пепс кол': 'Drinks',
'десерт': 'Desserts', 'основн': 'Garnishes', 'гарнир': 'Garnishes', 'завтрак': 'Garnishes',
'салат': 'Garnishes', 'закуск': 'Snacks', 'букет и снекбокс': 'Snacks', 'проч': 'Etc',
'проч товар': 'Etc', 'игрушк': 'Etc',
'доп': 'Etc', 'холодн напитк': 'Drinks', 'горяч напитк': 'Drinks'
}
def format_result(final_stages_time, vacancy_response, additional_sell, additional_sell_response,
additional_sale_adequacy, missed_addition, missed_addition_list):
result = {}
result['Vacancy'] = {'VacancyAvailability': int(len(final_stages_time[vacancy_key]) > 0),
'Result': 0}
if len(vacancy_response) > 0:
result['Vacancy']['Result'] = int(vacancy_response[1] == 'положительн')
final_stages_time.pop(vacancy_key)
result['Script'] = {STAGE_NAMING[x]: int(len(final_stages_time[x]) != 0) for x in final_stages_time.keys()}
for k in final_stages_time.keys():
if result['Script'][STAGE_NAMING[k]] != 0:
result['Script'][f'{STAGE_NAMING[k]}Time'] = final_stages_time[k][1] - final_stages_time[k][0]
else:
result['Script'][f'{STAGE_NAMING[k]}Time'] = 0.0
result['AdditionalSales'] = {}
for group in ADDITION_NAMING.keys():
result['AdditionalSales'][ADDITION_NAMING[group]] = {'Proposed': 0, 'Successful': 0, 'Adequate': 0, 'Missed': 0}
if len(additional_sell) > 0:
for group in additional_sell[1]:
result['AdditionalSales'][ADDITION_NAMING[group]]['Proposed'] = 1
if len(additional_sell_response) > 0 and result['AdditionalSales'][ADDITION_NAMING[group]]['Proposed']:
result['AdditionalSales'][ADDITION_NAMING[group]]['Successful'] = int(
additional_sell_response[1] == 'agree')
if len(additional_sale_adequacy) > 0 and result['AdditionalSales'][ADDITION_NAMING[group]]['Proposed']:
result['AdditionalSales'][ADDITION_NAMING[group]]['Adequate'] = int(
additional_sale_adequacy[0] == 'ok')
result['AdditionalSales'][ADDITION_NAMING[group]]['Missed'] = 0
else:
for group in ADDITION_NAMING.values():
result['AdditionalSales'][group]['Successful'] = 0
result['AdditionalSales'][group]['Adequate'] = 0
result['AdditionalSales'][group]['Missed'] = int(
any([ADDITION_NAMING[miss] == group for miss in missed_addition_list]))
return result
def parse_diarization_file(file_name):
all_phrases = open_text(file_name).split('\n')
operator_name = all_phrases[0].strip()
res = {client_id: [], operator_id: []}
max_time = 0.0
for seconds_text in all_phrases[1:]:
if seconds_text == '':
continue
start_end_text = seconds_text.split(', ')
max_time = float(start_end_text[1])
res[int(start_end_text[2])].append([float(start_end_text[0]), max_time,
full_preprocess_text(start_end_text[3])])
return res, max_time
def search_stage(full_text, file_stage):
all_info = pd.read_excel(file_stage, engine='openpyxl')
key_words = list(all_info[key_phrase].dropna().values)
return search_phrases(full_text, key_words)
def update_stages_id(stages_id, files, full_text, key):
for file in files:
found_ids = search_stage(full_text, DATA_NORMALIZED_SELLER_DIR / file)
if len(found_ids) > 0:
stages_id[key] += found_ids
if len(stages_id[key]) > 0:
new_ids = list(stages_id[key][0])
for i in range(1, len(stages_id[key])):
new_ids[1] = stages_id[key][i][1]
stages_id[key] = new_ids
def check_missed_addition(file, order):
all_info = pd.read_excel(file, engine='openpyxl')
order_category = find_order_category(DATA_NORMALIZED_SELLER_DIR / 'category_menu.xlsx', order)
res = set()
for index, row in all_info.iterrows():
if row['Order_category'] == order_category[0]:
if type(row['Ok']) == str:
res.add(row['Ok'])
return res
def check_adequacy(file, order, additional):
all_info = pd.read_excel(file, engine='openpyxl')
order_category = find_order_category(DATA_NORMALIZED_SELLER_DIR / 'category_menu.xlsx', order)
additional_category = find_order_category(DATA_NORMALIZED_SELLER_DIR / 'category_menu.xlsx', additional)
ok = []
not_ok = []
for index, row in all_info.iterrows():
if row['Order_category'] == order_category[0]:
if type(row['Ok']) == str:
ok.append(row['Ok'])
if type(row['Not_ok']) == str:
not_ok.append(row['Not_ok'])
res = ['UNK'] * len(additional_category)
for i, cat in enumerate(additional_category):
if cat in ok:
res[i] = 'ok'
if cat in not_ok:
res[i] = 'not_ok'
return res
def find_order_category(file, order):
all_info = pd.read_excel(file, engine='openpyxl')
meal_cat = {}
for index, row in all_info.iterrows():
meal_cat[row['Key_phrase']] = row['Category']
ids = search_phrases(order, list(meal_cat.keys()), interfere_thresh=0)
res = []
for id in ids:
res.append(order[id[0]:id[1]].strip())
return [meal_cat[x] for x in res]
def find_additional_response(files, stage_text):
all_info = pd.DataFrame(columns=['Key_phrase', 'category'])
for file in files:
tmp = pd.read_excel(DATA_NORMALIZED_CLIENT_DIR / file, engine='openpyxl')
tmp['category'] = pd.Series([file.split('.')[0].split('_')[1]] * len(tmp))
all_info = | pd.concat([all_info, tmp], axis=0) | pandas.concat |
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import re
from contextlib2 import ExitStack
import numpy as np
import pandas as pd
import sqlalchemy as sa
from toolz import first
from zipline.errors import AssetDBVersionError
from zipline.assets.asset_db_schema import (
generate_asset_db_metadata,
asset_db_table_names,
ASSET_DB_VERSION,
)
# Define a namedtuple for use with the load_data and _load_data methods
AssetData = namedtuple('AssetData', 'equities futures exchanges root_symbols')
SQLITE_MAX_VARIABLE_NUMBER = 999
# Default values for the equities DataFrame
_equities_defaults = {
'symbol': None,
'asset_name': None,
'start_date': 0,
'end_date': 2 ** 62 - 1,
'first_traded': None,
'auto_close_date': None,
'exchange': None,
}
# Default values for the futures DataFrame
_futures_defaults = {
'symbol': None,
'root_symbol': None,
'asset_name': None,
'start_date': 0,
'end_date': 2 ** 62 - 1,
'first_traded': None,
'exchange': None,
'notice_date': None,
'expiration_date': None,
'auto_close_date': None,
'tick_size': None,
'multiplier': 1,
}
# Default values for the exchanges DataFrame
_exchanges_defaults = {
'timezone': None,
}
# Default values for the root_symbols DataFrame
_root_symbols_defaults = {
'root_symbol_id': None,
'sector': None,
'description': None,
'exchange': None,
}
# Fuzzy symbol delimiters that may break up a company symbol and share class
_delimited_symbol_delimiter_regex = r'[./\-_]'
_delimited_symbol_default_triggers = frozenset({np.nan, None, ''})
def split_delimited_symbol(symbol):
"""
Takes in a symbol that may be delimited and splits it in to a company
symbol and share class symbol. Also returns the fuzzy symbol, which is the
symbol without any fuzzy characters at all.
Parameters
----------
symbol : str
The possibly-delimited symbol to be split
Returns
-------
( str, str , str )
A tuple of ( company_symbol, share_class_symbol, fuzzy_symbol)
"""
# return blank strings for any bad fuzzy symbols, like NaN or None
if symbol in _delimited_symbol_default_triggers:
return ('', '', '')
split_list = re.split(pattern=_delimited_symbol_delimiter_regex,
string=symbol,
maxsplit=1)
# Break the list up in to its two components, the company symbol and the
# share class symbol
company_symbol = split_list[0]
if len(split_list) > 1:
share_class_symbol = split_list[1]
else:
share_class_symbol = ''
# Strip all fuzzy characters from the symbol to get the fuzzy symbol
fuzzy_symbol = re.sub(pattern=_delimited_symbol_delimiter_regex,
repl='',
string=symbol)
return (company_symbol, share_class_symbol, fuzzy_symbol)
def _generate_output_dataframe(data_subset, defaults):
"""
Generates an output dataframe from the given subset of user-provided
data, the given column names, and the given default values.
Parameters
----------
data_subset : DataFrame
A DataFrame, usually from an AssetData object,
that contains the user's input metadata for the asset type being
processed
defaults : dict
A dict where the keys are the names of the columns of the desired
output DataFrame and the values are the default values to insert in the
DataFrame if no user data is provided
Returns
-------
DataFrame
A DataFrame containing all user-provided metadata, and default values
wherever user-provided metadata was missing
"""
# The columns provided.
cols = set(data_subset.columns)
desired_cols = set(defaults)
# Drop columns with unrecognised headers.
data_subset.drop(cols - desired_cols,
axis=1,
inplace=True)
# Get those columns which we need but
# for which no data has been supplied.
need = desired_cols - cols
# Combine the users supplied data with our required columns.
output = pd.concat(
(data_subset, pd.DataFrame(
{k: defaults[k] for k in need},
data_subset.index,
)),
axis=1,
copy=False
)
return output
def _dt_to_epoch_ns(dt_series):
"""Convert a timeseries into an Int64Index of nanoseconds since the epoch.
Parameters
----------
dt_series : pd.Series
The timeseries to convert.
Returns
-------
idx : pd.Int64Index
The index converted to nanoseconds since the epoch.
"""
index = pd.to_datetime(dt_series.values)
if index.tzinfo is None:
index = index.tz_localize('UTC')
else:
index = index.tz_convert('UTC')
return index.view(np.int64)
def check_version_info(version_table, expected_version):
"""
Checks for a version value in the version table.
Parameters
----------
version_table : sa.Table
The version table of the asset database
expected_version : int
The expected version of the asset database
Raises
------
AssetDBVersionError
If the version is in the table and not equal to ASSET_DB_VERSION.
"""
# Read the version out of the table
version_from_table = sa.select((version_table.c.version,)).scalar()
# A db without a version is considered v0
if version_from_table is None:
version_from_table = 0
# Raise an error if the versions do not match
if (version_from_table != expected_version):
raise AssetDBVersionError(db_version=version_from_table,
expected_version=expected_version)
def write_version_info(version_table, version_value):
"""
Inserts the version value in to the version table.
Parameters
----------
version_table : sa.Table
The version table of the asset database
version_value : int
The version to write in to the database
"""
sa.insert(version_table, values={'version': version_value}).execute()
class _empty(object):
columns = ()
class AssetDBWriter(object):
"""Class used to write data to an assets db.
Parameters
----------
engine : Engine or str
An SQLAlchemy engine or path to a SQL database.
"""
DEFAULT_CHUNK_SIZE = SQLITE_MAX_VARIABLE_NUMBER
def __init__(self, engine):
if isinstance(engine, str):
engine = sa.create_engine('sqlite:///' + engine)
self.engine = engine
def write(self,
equities=None,
futures=None,
exchanges=None,
root_symbols=None,
chunk_size=DEFAULT_CHUNK_SIZE):
with self.engine.begin() as txn:
# Create SQL tables if they do not exist.
metadata = self.init_db(txn)
# Get the data to add to SQL.
data = self._load_data(
equities if equities is not None else pd.DataFrame(),
futures if futures is not None else pd.DataFrame(),
exchanges if exchanges is not None else pd.DataFrame(),
root_symbols if root_symbols is not None else | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from quetzal.analysis import on_demand
from tqdm import tqdm
def tp_summary(links, shared):
links = links.copy()
links['index'] = links.index
line_link_dict = links.groupby('trip_id')['index'].agg(lambda s: set(s)).to_dict()
line_list = list(line_link_dict.keys())
link_set = set(links['index'])
df = shared.copy()
for key, value in line_link_dict.items():
df[key] = df['path'].apply(lambda p: bool(len(value.intersection(p)))) * df['volume_pt']
# walk
df['all_walk'] = df['path'].apply(lambda p: len(link_set.intersection(p)) == 0)
df['transfer'] = df.loc[:, line_list].astype(bool).T.sum()
df['exclusivity'] = 1 / df['transfer']
# we do not want to reach infinite exclusivity rates where no line is used
df.loc[df['transfer'] == 0, 'exclusivity'] = 0
def average_value(line, column):
try:
return np.average(df[column], weights=df[line])
except ZeroDivisionError: # Weights sum to zero, can't be normalized
return 0
# transfer
transfers = pd.Series(
[average_value(line, 'transfer') for line in line_list],
index=line_list
)
exclusivities = pd.Series(
[average_value(line, 'exclusivity') for line in line_list],
index=line_list
)
right = | pd.DataFrame({'transfer': transfers, 'exclusivity': exclusivities}) | pandas.DataFrame |
import os
import sys
import glob
import numpy as np
import pandas as pd
MAIN_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(MAIN_DIR)
from src.evaluation.multipleboardingpoints_eval import multiple_boarding_points
from src.misc.globals import *
EURO_PER_TON_OF_CO2 = 145 # from BVWP2030 Modulhandbuch (page 113)
EMISSION_CPG = 145 * 100 / 1000**2
ENERGY_EMISSIONS = 112 # g/kWh from https://www.swm.de/dam/swm/dokumente/geschaeftskunden/broschuere-strom-erdgas-gk.pdf
PV_G_CO2_KM = 130 # g/km from https://www.ris-muenchen.de/RII/RII/DOK/ANTRAG/2337762.pdf with 60:38 benzin vs diesel
def get_directory_dict(scenario_parameters):
"""
This function provides the correct paths to certain data according to the specified data directory structure.
:param scenario_parameters: simulation input (pandas series)
:return: dictionary with paths to the respective data directories
"""
study_name = scenario_parameters[G_STUDY_NAME]
scenario_name = scenario_parameters[G_SCENARIO_NAME]
network_name = scenario_parameters[G_NETWORK_NAME]
demand_name = scenario_parameters[G_DEMAND_NAME]
zone_name = scenario_parameters.get(G_ZONE_SYSTEM_NAME, None)
fc_type = scenario_parameters.get(G_FC_TYPE, None)
fc_t_res = scenario_parameters.get(G_FC_TR, None)
gtfs_name = scenario_parameters.get(G_GTFS_NAME, None)
infra_name = scenario_parameters.get(G_INFRA_NAME, None)
#
dirs = {}
dirs[G_DIR_MAIN] = MAIN_DIR # here is the difference compared to the function in FLeetsimulationBase.py
dirs[G_DIR_DATA] = os.path.join(dirs[G_DIR_MAIN], "data")
dirs[G_DIR_OUTPUT] = os.path.join(dirs[G_DIR_MAIN], "studies", study_name, "results", scenario_name)
dirs[G_DIR_NETWORK] = os.path.join(dirs[G_DIR_DATA], "networks", network_name)
dirs[G_DIR_VEH] = os.path.join(dirs[G_DIR_DATA], "vehicles")
dirs[G_DIR_FCTRL] = os.path.join(dirs[G_DIR_DATA], "fleetctrl")
dirs[G_DIR_DEMAND] = os.path.join(dirs[G_DIR_DATA], "demand", demand_name, "matched", network_name)
if zone_name is not None:
dirs[G_DIR_ZONES] = os.path.join(dirs[G_DIR_DATA], "zones", zone_name, network_name)
if fc_type is not None and fc_t_res is not None:
dirs[G_DIR_FC] = os.path.join(dirs[G_DIR_DATA], "demand", demand_name, "aggregated", zone_name, str(fc_t_res))
if gtfs_name is not None:
dirs[G_DIR_PT] = os.path.join(dirs[G_DIR_DATA], "pubtrans", gtfs_name)
if infra_name is not None:
dirs[G_DIR_INFRA] = os.path.join(dirs[G_DIR_DATA], "infra", infra_name, network_name)
return dirs
def read_op_output_file(output_dir, op_id, evaluation_start_time = None, evaluation_end_time = None):
""" this method reads the ouputfile for the operator and returns its dataframe
:param output_dir: directory of the scenario results
:param op_id: operator id to evaluate
:param evaluation_start_time: if given all entries starting before this time are discarded
:param evaluation_end_time: if given, all entries starting after this time are discarded
:return: output dataframe of specific operator
"""
op_df = pd.read_csv(os.path.join(output_dir, f"2-{int(op_id)}_op-stats.csv"))
if evaluation_start_time is not None:
op_df = op_df[op_df[G_VR_LEG_START_TIME] >= evaluation_start_time]
if evaluation_end_time is not None:
op_df = op_df[op_df[G_VR_LEG_START_TIME] < evaluation_end_time]
# test for correct datatypes
def convert_str(val):
if val != val:
return val
if type(val) == str:
return val
else:
return str(int(val))
test_convert = [G_VR_ALIGHTING_RID, G_VR_BOARDING_RID, G_VR_OB_RID]
for col in test_convert:
if op_df.dtypes[col] != str:
op_df[col] = op_df[col].apply(convert_str)
return op_df
def read_user_output_file(output_dir, evaluation_start_time = None, evaluation_end_time = None):
""" this method reads the ouputfile the users and returns its dataframe
:param output_dir: directory of the scenario results
:param op_id: operator id to evaluate
:param evaluation_start_time: if given all entries starting before this time are discarded
:param evaluation_end_time: if given, all entries starting after this time are discarded
:return: output dataframe of specific operator
"""
user_stats = pd.read_csv(os.path.join(output_dir, "1_user-stats.csv"))
if evaluation_start_time is not None:
user_stats = user_stats[user_stats[G_RQ_TIME] >= evaluation_start_time]
if evaluation_end_time is not None:
user_stats = user_stats[user_stats[G_RQ_TIME] < evaluation_end_time]
return user_stats
def decode_offer_str(offer_str):
""" create a dictionary from offer_str in outputfile """
offer_dict = {}
try:
offer_strs = offer_str.split("|")
except:
return {}
for offer_str in offer_strs:
x = offer_str.split(":")
op = int(x[0])
vals = ":".join(x[1:])
if len(vals) == 0:
continue
offer_dict[op] = {}
for offer_entries in vals.split(";"):
try:
offer_at, v2 = offer_entries.split(":")
except:
continue
try:
v2 = int(v2)
except:
try:
v2 = float(v2)
except:
pass
offer_dict[op][offer_at] = v2
return offer_dict
def create_vehicle_type_db(vehicle_data_dir):
list_veh_data_f = glob.glob(f"{vehicle_data_dir}/*csv")
veh_type_db = {} # veh_type -> veh_type_data
for f in list_veh_data_f:
veh_type_name = os.path.basename(f)[:-4]
veh_type_data = | pd.read_csv(f, index_col=0, squeeze=True) | pandas.read_csv |
# -*- coding: utf-8 -*-
#%% NumPyの読み込み
import numpy as np
# SciPyのstatsモジュールの読み込み
import scipy.stats as st
# SciPyのoptimizeモジュールの読み込み
import scipy.optimize as opt
# SciPyのLinalgモジュールの読み込み
import scipy.linalg as la
# Pandasの読み込み
import pandas as pd
# MatplotlibのPyplotモジュールの読み込み
import matplotlib.pyplot as plt
# 日本語フォントの設定
from matplotlib.font_manager import FontProperties
import sys
if sys.platform.startswith('win'):
FontPath = 'C:\\Windows\\Fonts\\meiryo.ttc'
elif sys.platform.startswith('darwin'):
FontPath = '/System/Library/Fonts/ヒラギノ角ゴシック W4.ttc'
elif sys.platform.startswith('linux'):
FontPath = '/usr/share/fonts/truetype/takao-gothic/TakaoPGothic.ttf'
else:
print('このPythonコードが対応していないOSを使用しています.')
sys.exit()
jpfont = FontProperties(fname=FontPath)
#%% 回帰モデルの係数と誤差項の分散に関するベイズ推論
# 逆ガンマ分布のHPD区間の計算
def invgamma_hpdi(hpdi0, alpha, beta, prob):
"""
入力
hpdi0: HPD区間の初期値
alpha: 逆ガンマ分布の形状パラメータ
beta: 逆ガンマ分布の尺度パラメータ
prob: HPD区間の確率 (0 < prob < 1)
出力
HPD区間
"""
def hpdi_conditions(v, a, b, p):
"""
入力
v: HPD区間
a: 逆ガンマ分布の形状パラメータ
b: 逆ガンマ分布の尺度パラメータ
p: HPD区間の確率 (0 < p < 1)
出力
HPD区間の条件式の値
"""
eq1 = st.invgamma.cdf(v[1], a, scale=b) \
- st.invgamma.cdf(v[0], a, scale=b) - p
eq2 = st.invgamma.pdf(v[1], a, scale=b) \
- st.invgamma.pdf(v[0], a, scale=b)
return np.hstack((eq1, eq2))
return opt.root(hpdi_conditions, hpdi0, args=(alpha, beta, prob)).x
# 回帰モデルの係数と誤差項の分散の事後統計量の計算
def regression_stats(y, X, b0, A0, nu0, lam0, prob):
"""
入力
y: 被説明変数
X: 説明変数
b0: 回帰係数の条件付事前分布(多変量正規分布)の平均
A0: 回帰係数の条件付事前分布(多変量正規分布)の精度行列
nu0: 誤差項の分散の事前分布(逆ガンマ分布)の形状パラメータ
lam0: 誤差項の分散の事前分布(逆ガンマ分布)の尺度パラメータ
prob: 区間確率 (0 < prob < 1)
出力
results: 事後統計量のデータフレーム
b_star: 回帰係数の条件付事後分布(多変量正規分布)の平均
A_star: 回帰係数の条件付事後分布(多変量正規分布)の精度行列
nu_star: 誤差項の分散の事後分布(逆ガンマ分布)の形状パラメータ
lam_star: 誤差項の分散の事後分布(逆ガンマ分布)の尺度パラメータ
"""
XX = X.T.dot(X)
Xy = X.T.dot(y)
b_ols = la.solve(XX, Xy)
A_star = XX + A0
b_star = la.solve(A_star, Xy + A0.dot(b0))
C_star = la.inv(la.inv(XX) + la.inv(A0))
nu_star = y.size + nu0
lam_star = np.square(y - X.dot(b_ols)).sum() \
+ (b0 - b_ols).T.dot(C_star).dot(b0 - b_ols) + lam0
h_star = np.sqrt(lam_star / nu_star * np.diag(la.inv(A_star)))
sd_b = st.t.std(nu_star, loc=b_star, scale=h_star)
ci_b = np.vstack(st.t.interval(prob, nu_star, loc=b_star, scale=h_star))
hpdi_b = ci_b
stats_b = np.vstack((b_star, b_star, b_star, sd_b, ci_b, hpdi_b)).T
mean_sigma2 = st.invgamma.mean(0.5*nu_star, scale=0.5*lam_star)
median_sigma2 = st.invgamma.median(0.5*nu_star, scale=0.5*lam_star)
mode_sigma2 = lam_star / (nu_star + 2.0)
sd_sigma2 = st.invgamma.std(0.5*nu_star, scale=0.5*lam_star)
ci_sigma2 = st.invgamma.interval(prob, 0.5*nu_star, scale=0.5*lam_star)
hpdi_sigma2 = invgamma_hpdi(ci_sigma2, 0.5*nu_star, 0.5*lam_star, prob)
stats_sigma2 = np.hstack((mean_sigma2, median_sigma2, mode_sigma2,
sd_sigma2, ci_sigma2, hpdi_sigma2))
stats = np.vstack((stats_b, stats_sigma2))
stats_string = ['平均', '中央値', '最頻値', '標準偏差', '信用区間(下限)',
'信用区間(上限)', 'HPD区間(下限)', 'HPD区間(上限)']
param_string = ['切片 $\\alpha$', '傾き $\\beta$', '分散 $\\sigma^2$']
results = | pd.DataFrame(stats, index=param_string, columns=stats_string) | pandas.DataFrame |
# Importing packages
import os
import re
from pathlib import Path
import pandas as pd
import numpy as np
# Basic python scripting using object-oriented coding
'''
Using the corpus called 100-english-novels, write a Python programme which does the following:
- The script should take a directory of text files, a keyword, and a window size (number of words) as input parameters, and an output file called out/{filename}.csv
These parameters can be defined in the script itself
- Find out how often each word collocates with the target across the corpus
- Use this to calculate mutual information between the target word and all collocates across the corpus
- Save result as a single file consisting of three columns: collocate, raw_frequency, MI
'''
'''
Shortcomings of this script:
- The current function concatenates all text files before calculating relevant scores. Therefore, the 'window' slides across the boundaries of each text which is not optimal.
- What if a collocate appears multiple times in a given window?
'''
# Defining main function
def main():
Collocation(keyword = "cat", window_size = 2) # Change argument input to generate new files with different keywords or window sizes
# Setting class 'CountFunctions'
class Collocation:
def __init__(self, keyword, window_size):
self.keyword = keyword # Defining keyword as the parsed argument 'keyword'
self.window_size = window_size # Defining keyword as the parsed argument 'window_size'
data_dir = self.setting_data_directory() # Setting data directory
out_dir = self.setting_output_directory() # Setting output directory for the generated csv file
files = self.get_paths_from_data_directory(data_dir) # Getting list of filepaths for the images
tokenized_text = self.get_tokenized_concatenated_texts(files) # Getting tokenized version off concatenated text corpus
collocates, R1 = self.get_list_of_collocates(tokenized_text, self.keyword , self.window_size) # Getting list of unique collocates
raw_frequencies = [] # Empty list for raw frequencies of collocates
MIs = [] # Empty list for MI-scores between keyword and collocate for all collocates
# Loop through collocate
for collocate in collocates:
collocate_raw_frequency = self.get_raw_frequency(tokenized_text, collocate) # Raw frequency of collocate
O11 = self.get_O11(tokenized_text, self.keyword , collocate, self.window_size) # Joint frequency of keyword and collocate
C1 = collocate_raw_frequency # Calculating: Same as raw frequency of collocate
N = len(tokenized_text) # O11 + O12 + O21 + O22
E11 = (R1 * C1 / N) # Expected frequency
MI = np.log2(O11 / E11) # Mutual information
# Adding information for given collocate to list
raw_frequencies.append(collocate_raw_frequency)
MIs.append(MI)
# Gathering needed lists into a dictionary
data_dict = {"collocate": collocates,
"raw_frequency": raw_frequencies,
"MI": MIs}
# Creating pd data frame from dictionary
df = | pd.DataFrame(data=data_dict) | pandas.DataFrame |
import sys
import click
import requests, requests_cache
import configparser
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
from datetime import datetime
from mpl_toolkits.axes_grid1 import make_axes_locatable
from pynance.auth import signed_params
from pynance.utils import create_session, create_datetime, to_milliseconds
from utils import WIDTH, GOLDEN_RATIO, pt_to_in
def create_trades_frame(trades_list):
trades = | pd.DataFrame(trades_list) | pandas.DataFrame |
#!/usr/bin/python -u
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import random
import argparse
SEED = 123
random.seed(SEED)
np.random.seed(SEED)
# -
def train_test_set(df,train_ids,test_ids):
train_df = df.iloc[train_ids,:]
test_df = df.iloc[test_ids,:]
return(train_df,test_df)
def refine_protein_sequences(df,cutoff):
all_protein_sequences = df["Sequence"].unique()
all_protein_sequences = sorted(all_protein_sequences,key=len)
revised_protein_sequences={}
for x in all_protein_sequences:
if (len(x)<=cutoff):
revised_protein_sequences[x]=x
else:
revised_protein_sequences[x]=x[:2000]
df["Sequence"].replace(revised_protein_sequences,inplace=True)
return df
# +
def get_train_test_samples(input1,input2,output1,output2):
#Parse the big data with latent space
big1_df = pd.read_csv("../data/Compound_Virus_Interactions/"+input1,header='infer')
big2_df = pd.read_csv("../data/Compound_Virus_Interactions/"+input2,header='infer')
#Write interactions with protein id, protein sequence, drug inchi key, drug smiles, pchembl value
interaction1_df = big1_df.iloc[:,[0,4,5,6,8]].copy()
interaction2_df = big2_df.iloc[:,[0,4,5,6,8]].copy()
print(interaction1_df.columns)
print(interaction2_df.columns)
interaction1_df = refine_protein_sequences(interaction1_df,2000)
interaction2_df = refine_protein_sequences(interaction2_df,2000)
#Write the interaction data frame with the revisions
#interaction1_df.to_csv("../data/Drug_Protein_Networks/Filtered_Drug_Viral_interactions_for_Supervised_Learning.csv",index=False)
#interaction2_df.to_csv("../data/Drug_Protein_Networks/Thomas_Filtered_Drug_Viral_interactions_for_Supervised_Learning.csv",index=False)
interaction_df = pd.concat([interaction1_df,interaction2_df],ignore_index=True)
interaction_df.drop_duplicates(subset=['uniprot_accession','standard_inchi_key'],inplace=True)
interaction_df.reset_index(inplace=True, drop=True)
interaction_df
print(interaction1_df.shape)
print(interaction2_df.shape)
# -
#Unique no of viral organisms in the dataset
print(np.size(np.union1d(big1_df['organism'].unique(),big2_df['organism'].unique())))
plt.hist(interaction_df["pchembl_value"])
# +
#Create the train test split to be used by all downstream ML methods
y = interaction_df["pchembl_value"].values
indices = np.arange(interaction_df.shape[0])
_,_,_,_, indices_train, indices_test = train_test_split(interaction_df, y, indices, test_size=0.1, random_state=42)
indices_train,indices_test = list(indices_train),list(indices_test)
indices_train_set = set(indices_train)
indices_test_set = set(indices_test)
indices_list = []
for i in range(len(indices_train)):
indices_list.append(['train',indices_train[i]])
for i in range(len(indices_test)):
indices_list.append(['test',indices_test[i]])
indices_df = | pd.DataFrame(indices_list, columns=['split','ids']) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 0.8.6
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Process Data
# ## Load Libraries
import sys
import pandas as pd
from sqlalchemy import create_engine
# ## Load Data
def load_data(messages_filepath, categories_filepath):
"""
Input:
1. messages_filepath: path of messages datasets
2. categories_filepath: path of categories datasets
Output:
1. df: merged dataframe, which contains data from messages, categories files
Process:
1. Load the required datasets, messages, categories
2. Merge the two datasets
"""
# Load messages dataset
messages = pd.read_csv(messages_filepath)
# Load categories dataset
categories = pd.read_csv(categories_filepath)
# Merge datasets
df = | pd.merge(messages, categories, on='id', how='inner') | pandas.merge |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from sklearn.cluster import DBSCAN
from lifelines.statistics import logrank_test
from lifelines import KaplanMeierFitter
from sklearn import metrics
from sklearn.metrics import pairwise_distances
from lifelines.statistics import pairwise_logrank_test
from lifelines.statistics import multivariate_logrank_test
from collections import Counter
from sklearn.metrics.pairwise import cosine_similarity
import seaborn as sns
from lifelines.statistics import logrank_test
from sklearn import metrics
from sklearn.metrics import pairwise_distances
def EuclideanDistances(A, B):
BT = B.transpose()
vecProd = np.dot(A,BT)
SqA = A**2
sumSqA = np.matrix(np.sum(SqA, axis=1))
sumSqAEx = np.tile(sumSqA.transpose(), (1, vecProd.shape[1]))
SqB = B**2
sumSqB = np.sum(SqB, axis=1)
sumSqBEx = np.tile(sumSqB, (vecProd.shape[0], 1))
SqED = sumSqBEx + sumSqAEx - 2*vecProd
SqED[SqED<0]=0.0
ED = np.sqrt(SqED)
return ED
dataset4 = pd.read_csv("processing/out_data/12345_3_CESC.txt",header=None,sep=',')
df = pd.read_csv("dataset/TCGA_Clinical/CESC/nationwidechildrens.org_clinical_patient_cesc.txt",sep='\t')
df=df[["bcr_patient_barcode","gender","vital_status","last_contact_days_to","death_days_to"]]
a=[]
for i in dataset4[0]:
str=i
a.append(str[0:12])
a1=pd.DataFrame(a)
dataset4[1282]=a1
t=df.merge(dataset4,left_on=df["bcr_patient_barcode"],right_on=dataset4[1282],how='left')
class_mapping = {'Dead':0, 'Alive':1}
t["vital_status"] = t["vital_status"].map(class_mapping)
del t["key_0"]
del t[1282]
t=t.dropna(axis=0,how='any')
for i in range(8,80):
for j in range(2,10):
X=t.iloc[:,6:1286]
db = DBSCAN(eps=i, min_samples=j).fit(X)
labels = db.labels_
if len(set(list(labels)))==2:
result = Counter(labels)
print(i,j,len(set(list(labels))),result)
X=t.iloc[:,6:1286]
db = DBSCAN(eps=75, min_samples=10).fit(X)
labels = db.labels_
t['cluster_db'] = labels
t=t.sort_values('cluster_db')
data_zs=t.iloc[:,6:1286]
tsne=TSNE()
tsne.fit_transform(data_zs)
#a=tsne.fit_transform(data_zs)
tsne=pd.DataFrame(tsne.embedding_,index=data_zs.index)
d=cosine_similarity(t1.values, t1.values)
Euclidean_dis=EuclideanDistances(tsne.values,tsne.values)
d1= | pd.DataFrame(Euclidean_dis) | pandas.DataFrame |
"""
TODO:
copy in data dir
targzip
post
DONE:
clean date_time string
rename types:
type -> type_string
processed_type -> item_type
merge the data
save file
only 1 section (check content is correct): checked
"""
import datetime
import os
import re
import requests
import urllib.parse
import time
from bs4 import BeautifulSoup
import html2text
import numpy as np
import pandas
from selenium import webdriver
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
# params
search_key_word = 'climate <PASSWORD>'
search_key = 'search'
url = r'https://www.cato.org/search'
link_list_data_file_path = 'url-data-cato-institute.csv'
data_dir = 'cato-institute'
html_dir = os.path.join(data_dir, 'html')
text_dir = os.path.join(data_dir, 'text')
articles_data_file = 'pages.csv'
quick_save_period = 10
do_print = True
testing = False
test_end = 20
# lib
def clean_text(text):
return re.sub(' $', '', re.sub('^ ', '', re.sub(' +', ' ', text.replace('\n', ' '))))
def read_page_entries(driver, time_stamp, page_index, page_lists, do_print = True):
entries_container = driver.find_element_by_class_name('algolia-search-results').find_element_by_class_name('ais-Hits-list')
entries = entries_container.find_elements_by_class_name('ais-Hits-item')
if(do_print):
print(f'Page {page_index} has {len(entries)} entries')
for entry_index, entry in enumerate(entries):
meta_soup = entry.find_element_by_class_name('article-embed__meta')
date_soup = meta_soup.find_elements_by_class_name('meta')[0]
date = date_soup.text
entry_type = meta_soup.find_element_by_class_name('article-embed__topic').text
entry_body_soup = entry.find_element_by_class_name('article-embed__inner')
entry_title_and_link_soup = entry_body_soup.find_element_by_tag_name('h3').find_element_by_tag_name('a')
link = entry_title_and_link_soup.get_attribute('href')
title = entry_title_and_link_soup.text
page_lists['time_stamp'].append(time_stamp)
page_lists['page'].append(page_index)
page_lists['index_in_page'].append(entry_index)
page_lists['date'].append(date)
page_lists['type'].append(entry_type)
page_lists['title'].append(title)
page_lists['link'].append(link)
def quick_save(page_lists, data_file_path):
print(f'saving...')
data = | pandas.DataFrame(page_lists) | pandas.DataFrame |
import pandas as pd
import numpy as np
import math
from scipy.stats import nct
from copy import deepcopy
import matplotlib.pyplot as plt
from ..estimators.stan_estimator import StanEstimatorMAP
from ..exceptions import IllegalArgument, ModelException
from ..utils.kernels import sandwich_kernel
from ..utils.features import make_fourier_series_df
from .template import BaseTemplate, MAPTemplate
from ..constants.constants import PredictionKeys, PredictMethod
from ..constants import ktrlite as constants
from orbit.constants.palette import OrbitPalette
from ..initializer.ktrlite import KTRLiteInitializer
class BaseKTRLite(BaseTemplate):
"""Base KTRLite model object with shared functionality for MAP method
Parameters
----------
seasonality : int, or list of int
multiple seasonality
seasonality_fs_order : int, or list of int
fourier series order for seasonality
level_knot_scale : float
sigma for level; default to be .5
seasonal_initial_knot_scale : float
scale parameter for seasonal regressors initial coefficient knots; default to be 1
seasonal_knot_scale : float
scale parameter for seasonal regressors drift of coefficient knots; default to be 0.1.
span_level : float between (0, 1)
window width to decide the number of windows for the level (trend) term.
e.g., span 0.1 will produce 10 windows.
span_coefficients : float between (0, 1)
window width to decide the number of windows for the regression term
degree of freedom : int
degree of freedom for error t-distribution
level_knot_dates : array like
list of pre-specified dates for the level knots
level_knot_length : int
the distance between every two knots for level
coefficients_knot_length : int
the distance between every two knots for coefficients
knot_location : {'mid_point', 'end_point'}; default 'mid_point'
knot locations. When level_knot_dates is specified, this is ignored for level knots.
date_freq : str
date frequency; if not supplied, pd.infer_freq will be used to imply the date frequency.
kwargs
To specify `estimator_type` or additional args for the specified `estimator_type`
"""
# data labels for sampler
_data_input_mapper = constants.DataInputMapper
# used to match name of `*.stan` or `*.pyro` file to look for the model
_model_name = 'ktrlite'
def __init__(self,
seasonality=None,
seasonality_fs_order=None,
level_knot_scale=0.5,
seasonal_initial_knot_scale=1.0,
seasonal_knot_scale=0.1,
span_level=0.1,
span_coefficients=0.3,
degree_of_freedom=30,
# knot customization
level_knot_dates=None,
level_knot_length=None,
coefficients_knot_length=None,
knot_location='mid_point',
date_freq=None,
**kwargs):
super().__init__(**kwargs) # create estimator in base class
self.span_level = span_level
self.level_knot_scale = level_knot_scale
# customize knot dates for levels
self.level_knot_dates = level_knot_dates
self.level_knot_length = level_knot_length
self.coefficients_knot_length = coefficients_knot_length
self.knot_location = knot_location
self.seasonality = seasonality
self.seasonality_fs_order = seasonality_fs_order
self.seasonal_initial_knot_scale = seasonal_initial_knot_scale
self.seasonal_knot_scale = seasonal_knot_scale
# set private var to arg value
# if None set default in _set_default_args()
# use public one if knots length is not available
self._seasonality = self.seasonality
self._seasonality_fs_order = self.seasonality_fs_order
self._seasonal_knot_scale = self.seasonal_knot_scale
self._seasonal_initial_knot_scale = None
self._seasonal_knot_scale = None
self._level_knot_dates = self.level_knot_dates
self._degree_of_freedom = degree_of_freedom
self.span_coefficients = span_coefficients
# self.rho_coefficients = rho_coefficients
self.date_freq = date_freq
# regression attributes -- now is ONLY used for fourier series as seasonality
self.num_of_regressors = 0
self.regressor_col = list()
self.regressor_col_gp = list()
self.coefficients_initial_knot_scale = list()
self.coefficients_knot_scale = list()
# set static data attributes
# self._set_static_attributes()
# set model param names
# this only depends on static attributes, but should these params depend
# on dynamic data (e.g actual data matrix, number of responses, etc) this should be
# called after fit instead
# self._set_model_param_names()
# basic response fields
# mainly set by ._set_dynamic_attributes()
self.response_offset = 0
self.is_valid_response = None
self.which_valid_response = None
self.num_of_valid_response = 0
self.num_knots_level = None
self.knots_tp_level = None
self.num_knots_coefficients = None
self.knots_tp_coefficients = None
self.regressor_matrix = None
# self.coefficients_knot_dates = None
def _set_init_values(self):
"""Override function from Base Template"""
# init_values_partial = partial(init_values_callable, seasonality=seasonality)
# partialfunc does not work when passed to PyStan because PyStan uses
# inspect.getargspec(func) which seems to raise an exception with keyword-only args
# caused by using partialfunc
# lambda as an alternative workaround
if len(self._seasonality) > 1 and self.num_of_regressors > 0:
init_values_callable = KTRLiteInitializer(self.num_of_regressors, self.num_knots_coefficients)
self._init_values = init_values_callable
# initialization related modules
def _set_default_args(self):
"""Set default attributes for None
"""
if self.seasonality is None:
self._seasonality = list()
self._seasonality_fs_order = list()
elif not isinstance(self._seasonality, list) and isinstance(self._seasonality * 1.0, float):
self._seasonality = [self.seasonality]
if self._seasonality and self._seasonality_fs_order is None:
self._seasonality_fs_order = [2] * len(self._seasonality)
elif not isinstance(self._seasonality_fs_order, list) and isinstance(self._seasonality_fs_order * 1.0, float):
self._seasonality_fs_order = [self.seasonality_fs_order]
if len(self._seasonality_fs_order) != len(self._seasonality):
raise IllegalArgument('length of seasonality and fs_order not matching')
for k, order in enumerate(self._seasonality_fs_order):
if 2 * order > self._seasonality[k] - 1:
raise IllegalArgument('reduce seasonality_fs_order to avoid over-fitting')
if not isinstance(self.seasonal_initial_knot_scale, list) and \
isinstance(self.seasonal_initial_knot_scale * 1.0, float):
self._seasonal_initial_knot_scale = [self.seasonal_initial_knot_scale] * len(self._seasonality)
else:
self._seasonal_initial_knot_scale = self.seasonal_initial_knot_scale
if not isinstance(self.seasonal_knot_scale, list) and isinstance(self.seasonal_knot_scale * 1.0, float):
self._seasonal_knot_scale = [self.seasonal_knot_scale] * len(self._seasonality)
else:
self._seasonal_knot_scale = self.seasonal_knot_scale
def _set_seasonality_attributes(self):
"""given list of seasonalities and their order, create list of seasonal_regressors_columns"""
self.regressor_col_gp = list()
self.regressor_col = list()
self.coefficients_initial_knot_scale = list()
self.coefficients_knot_scale = list()
if len(self._seasonality) > 0:
for idx, s in enumerate(self._seasonality):
fs_cols = []
order = self._seasonality_fs_order[idx]
self.coefficients_initial_knot_scale += [self._seasonal_initial_knot_scale[idx]] * order * 2
self.coefficients_knot_scale += [self._seasonal_knot_scale[idx]] * order * 2
for i in range(1, order + 1):
fs_cols.append('seas{}_fs_cos{}'.format(s, i))
fs_cols.append('seas{}_fs_sin{}'.format(s, i))
# flatten version of regressor columns
self.regressor_col += fs_cols
# list of group of regressor columns bundled with seasonality
self.regressor_col_gp.append(fs_cols)
self.num_of_regressors = len(self.regressor_col)
def _set_static_attributes(self):
"""Over-ride function from Base Template"""
self._set_default_args()
self._set_seasonality_attributes()
# fit and predict related modules
def _set_validate_ktr_params(self, df):
if self._seasonality:
max_seasonality = np.round(np.max(self._seasonality)).astype(int)
if self.num_of_observations < max_seasonality:
raise ModelException(
"Number of observations {} is less than max seasonality {}".format(
self.num_of_observations, max_seasonality))
# get some reasonable offset to regularize response to make default priors scale-insensitive
if self._seasonality:
max_seasonality = np.round(np.max(self._seasonality)).astype(int)
self.response_offset = np.nanmean(self.response[:max_seasonality])
else:
self.response_offset = np.nanmean(self.response)
self.is_valid_response = ~np.isnan(self.response)
# [0] to convert tuple back to array
self.which_valid_response = np.where(self.is_valid_response)[0]
self.num_of_valid_response = len(self.which_valid_response)
def _make_seasonal_regressors(self, df, shift):
"""
Parameters
----------
df : pd.DataFrame
shift: int
use 0 for fitting; use delta of prediction start and train start for prediction
Returns
-------
pd.DataFrame
data with computed fourier series attached
"""
if len(self._seasonality) > 0:
for idx, s in enumerate(self._seasonality):
order = self._seasonality_fs_order[idx]
df, _ = make_fourier_series_df(df, s, order=order, prefix='seas{}_'.format(s), shift=shift)
return df
def _set_regressor_matrix(self, df):
# init of regression matrix depends on length of response vector
self.regressor_matrix = np.zeros((self.num_of_observations, 0), dtype=np.double)
if self.num_of_regressors > 0:
self.regressor_matrix = df.filter(items=self.regressor_col, ).values
@staticmethod
def get_gap_between_dates(start_date, end_date, freq):
diff = end_date - start_date
gap = np.array(diff / np.timedelta64(1, freq))
return gap
@staticmethod
def _set_knots_tp(knots_distance, cutoff, knot_location):
if knot_location == 'mid_point':
# knot in the middle
knots_idx_start = round(knots_distance / 2)
knots_idx = np.arange(knots_idx_start, cutoff, knots_distance)
elif knot_location == 'end_point':
# knot in the end
knots_idx = np.sort(np.arange(cutoff - 1, 0, -knots_distance))
return knots_idx
def _set_kernel_matrix(self, df):
# Note that our tp starts by 1; to convert back to index of array, reduce it by 1
tp = np.arange(1, self.num_of_observations + 1) / self.num_of_observations
# this approach put knots in full range
self._cutoff = self.num_of_observations
# kernel of level calculations
if self._level_knot_dates is None:
if self.level_knot_length is not None:
knots_distance = self.level_knot_length
else:
number_of_knots = round(1 / self.span_level)
# FIXME: is it the best way to calculate knots_distance?
knots_distance = math.ceil(self._cutoff / number_of_knots)
knots_idx_level = self._set_knots_tp(knots_distance, self._cutoff, self.knot_location)
self._knots_idx_level = knots_idx_level
self.knots_tp_level = (1 + knots_idx_level) / self.num_of_observations
self._level_knot_dates = df[self.date_col].values[knots_idx_level]
else:
# to exclude dates which are not within training period
self._level_knot_dates = pd.to_datetime([
x for x in self._level_knot_dates if
(x <= df[self.date_col].values[-1]) and (x >= df[self.date_col].values[0])
])
# since we allow _level_knot_dates to be continuous, we calculate distance between knots
# in continuous value as well (instead of index)
if self.date_freq is None:
self.date_freq = pd.infer_freq(df[self.date_col])[0]
start_date = self.training_start
self.knots_tp_level = np.array(
(self.get_gap_between_dates(start_date, self._level_knot_dates, self.date_freq) + 1) /
(self.get_gap_between_dates(start_date, self.training_end, self.date_freq) + 1)
)
self.kernel_level = sandwich_kernel(tp, self.knots_tp_level)
self.num_knots_level = len(self.knots_tp_level)
self.kernel_coefficients = np.zeros((self.num_of_observations, 0), dtype=np.double)
self.num_knots_coefficients = 0
# kernel of coefficients calculations
if self.num_of_regressors > 0:
if self.coefficients_knot_length is not None:
knots_distance = self.coefficients_knot_length
else:
number_of_knots = round(1 / self.span_coefficients)
knots_distance = math.ceil(self._cutoff / number_of_knots)
knots_idx_coef = self._set_knots_tp(knots_distance, self._cutoff, self.knot_location)
self._knots_idx_coef = knots_idx_coef
self.knots_tp_coefficients = (1 + knots_idx_coef) / self.num_of_observations
self._coef_knot_dates = df[self.date_col].values[knots_idx_coef]
self.kernel_coefficients = sandwich_kernel(tp, self.knots_tp_coefficients)
self.num_knots_coefficients = len(self.knots_tp_coefficients)
def _set_dynamic_attributes(self, df):
"""Overriding: func: `~orbit.models.BaseETS._set_dynamic_attributes"""
# extra settings and validation for KTRLite
self._set_validate_ktr_params(df)
# attach fourier series as regressors
df = self._make_seasonal_regressors(df, shift=0)
# set regressors as input matrix and derive kernels
self._set_regressor_matrix(df)
self._set_kernel_matrix(df)
def _set_model_param_names(self):
"""Model parameters to extract"""
self._model_param_names += [param.value for param in constants.BaseSamplingParameters]
if len(self._seasonality) > 0 or self.num_of_regressors > 0:
self._model_param_names += [param.value for param in constants.RegressionSamplingParameters]
def _predict(self, posterior_estimates, df, include_error=False, **kwargs):
"""Vectorized version of prediction math"""
################################################################
# Prediction Attributes
################################################################
start = self.prediction_input_meta['start']
trained_len = self.num_of_observations
output_len = self.prediction_input_meta['df_length']
################################################################
# Model Attributes
################################################################
model = deepcopy(posterior_estimates)
# TODO: adopt torch ?
# for k, v in model.items():
# model[k] = torch.from_numpy(v)
# We can pull any arbitrary value from the dictionary because we hold the
# safe assumption: the length of the first dimension is always the number of samples
# thus can be safely used to determine `num_sample`. If predict_method is anything
# other than full, the value here should be 1
arbitrary_posterior_value = list(model.values())[0]
num_sample = arbitrary_posterior_value.shape[0]
################################################################
# Trend Component
################################################################
new_tp = np.arange(start + 1, start + output_len + 1) / trained_len
if include_error:
# in-sample knots
lev_knot_in = model.get(constants.BaseSamplingParameters.LEVEL_KNOT.value)
# TODO: hacky way; let's just assume last two knot distance is knots distance for all knots
lev_knot_width = self.knots_tp_level[-1] - self.knots_tp_level[-2]
# check whether we need to put new knots for simulation
if new_tp[-1] > 1:
# derive knots tp
if self.knots_tp_level[-1] + lev_knot_width >= new_tp[-1]:
knots_tp_level_out = np.array([new_tp[-1]])
else:
knots_tp_level_out = np.arange(self.knots_tp_level[-1] + lev_knot_width, new_tp[-1], lev_knot_width)
new_knots_tp_level = np.concatenate([self.knots_tp_level, knots_tp_level_out])
lev_knot_out = np.random.laplace(0, self.level_knot_scale,
size=(lev_knot_in.shape[0], len(knots_tp_level_out)))
lev_knot_out = np.cumsum(np.concatenate([lev_knot_in[:, -1].reshape(-1, 1), lev_knot_out],
axis=1), axis=1)[:, 1:]
lev_knot = np.concatenate([lev_knot_in, lev_knot_out], axis=1)
else:
new_knots_tp_level = self.knots_tp_level
lev_knot = lev_knot_in
kernel_level = sandwich_kernel(new_tp, new_knots_tp_level)
else:
lev_knot = model.get(constants.BaseSamplingParameters.LEVEL_KNOT.value)
kernel_level = sandwich_kernel(new_tp, self.knots_tp_level)
obs_scale = model.get(constants.BaseSamplingParameters.OBS_SCALE.value)
obs_scale = obs_scale.reshape(-1, 1)
trend = np.matmul(lev_knot, kernel_level.transpose(1, 0))
################################################################
# Seasonality Component
################################################################
# init of regression matrix depends on length of response vector
total_seas_regression = np.zeros(trend.shape, dtype=np.double)
seas_decomp = {}
# update seasonal regression matrices
if self._seasonality and self.regressor_col:
df = self._make_seasonal_regressors(df, shift=start)
coef_knot = model.get(constants.RegressionSamplingParameters.COEFFICIENTS_KNOT.value)
kernel_coefficients = sandwich_kernel(new_tp, self.knots_tp_coefficients)
coef = np.matmul(coef_knot, kernel_coefficients.transpose(1, 0))
pos = 0
for idx, cols in enumerate(self.regressor_col_gp):
seasonal_regressor_matrix = df[cols].values
seas_coef = coef[..., pos:(pos + len(cols)), :]
seas_regression = np.sum(seas_coef * seasonal_regressor_matrix.transpose(1, 0), axis=-2)
seas_decomp['seasonality_{}'.format(self._seasonality[idx])] = seas_regression
pos += len(cols)
total_seas_regression += seas_regression
if include_error:
epsilon = nct.rvs(self._degree_of_freedom, nc=0, loc=0,
scale=obs_scale, size=(num_sample, len(new_tp)))
pred_array = trend + total_seas_regression + epsilon
else:
pred_array = trend + total_seas_regression
out = {
PredictionKeys.PREDICTION.value: pred_array,
PredictionKeys.TREND.value: trend,
}
out.update(seas_decomp)
return out
class KTRLiteMAP(MAPTemplate, BaseKTRLite):
"""Concrete KTRLite model for MAP (Maximum a Posteriori) prediction
This model only supports MAP estimator type
"""
_supported_estimator_types = [StanEstimatorMAP]
def __init__(self, estimator_type=StanEstimatorMAP, **kwargs):
super().__init__(estimator_type=estimator_type, **kwargs)
# FIXME: need a unit test of this function
def get_level_knots(self):
out = {
self.date_col:
self._level_knot_dates,
constants.BaseSamplingParameters.LEVEL_KNOT.value:
# TODO: this is hacky, investigate why we have an extra dimension here?
np.squeeze(self._aggregate_posteriors[PredictMethod.MAP.value][
constants.BaseSamplingParameters.LEVEL_KNOT.value], 0),
}
return | pd.DataFrame(out) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
import reciprocalspaceship as rs
@pytest.fixture
def na_value(dtype):
return dtype.na_value
@pytest.fixture
def na_cmp():
return lambda x, y: | pd.isna(x) | pandas.isna |
from datetime import time
import numpy as np
import pytest
from pandas import DataFrame, date_range
import pandas._testing as tm
class TestBetweenTime:
def test_between_time(self, close_open_fixture):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
inc_start, inc_end = close_open_fixture
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert t >= stime
else:
assert t > stime
if inc_end:
assert t <= etime
else:
assert t < etime
result = ts.between_time("00:00", "01:00")
expected = ts.between_time(stime, etime)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import pandas as pd
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics import mean_absolute_error
import operator
import random
class RecommenderSystem(object):
def __init__(self, data, metric, algorithm, user):
self.data = data
self.distance, self.neighbors = self.__find_neighbors(metric, algorithm)
self.recommended_tuple = self.__recommend(user)
@staticmethod
def __get_neighbors(indices, distance, df):
all_neighbors = []
for x in range(len(indices)):
neighbors = []
for i in range(len(indices[x])):
neighbors.append((df.iloc[indices[x, i], 0], distance[x, i]))
all_neighbors.append(neighbors)
return all_neighbors
def __find_neighbors(self, metric, algorithm, n_neighbors=5):
knn = NearestNeighbors(metric=metric, p=2, algorithm=algorithm)
knn.fit(self.data.iloc[:, :5].values)
distance, indices = knn.kneighbors(self.data.iloc[:, :5].values, n_neighbors=n_neighbors)
return distance, self.__get_neighbors(indices, distance, self.data.iloc[:, :5])
def __recommend(self, user):
user_games = self.data[self.data['user-id'] == user]
dissim_games = []
for neighbor in self.neighbors[self.data.index[self.data["user-id"] == user].tolist()[0]]:
temp = self.data[(self.data['user-id'] == neighbor[0]) & (~self.data['game-title'].isin(user_games['game-title']))]
for index, game in temp.iterrows():
dissim_games.append((game['game-title'], game['rating']))
dissim_games.sort(key=operator.itemgetter(0))
flag = ""
rec_list, running_sum, count = [], 0, 0
for dis in dissim_games:
if flag != dis[0]:
if flag != "":
rec_list.append((flag, running_sum / count))
flag = dis[0]
running_sum = dis[1]
count = 1
else:
running_sum += dis[1]
count += 1
sort_list = sorted(rec_list, key=operator.itemgetter(1), reverse=True)
return (sort_list)
def __rec_games(self):
games = []
for pair in self.recommended_tuple:
if pair[1] > 3.8:
games.append(pair[0])
return games
def execute_system(self):
recommendations = self.__rec_games()
return recommendations
def evaluate(self, user):
errors_lst = []
user_r = self.data[self.data["user-id"] == user]
for i in self.neighbors[self.data.index[self.data["user-id"] == user].tolist()[0]]:
neighbor = self.data[self.data["user-id"] == i[0]]
ma = mean_absolute_error(pd.merge(user_r, neighbor, how="inner", on="game-title")["rating_x"],
| pd.merge(user_r, neighbor, how="inner", on="game-title") | pandas.merge |
# -*- coding: utf-8 -*-
"""
Tools producing reports of fairness, bias, or model performance measures
Contributors:
camagallen <<EMAIL>>
"""
import aif360.sklearn.metrics as aif
from functools import reduce
from IPython.display import HTML
import logging
import numpy as np
import pandas as pd
from sklearn.metrics import (mean_absolute_error, mean_squared_error,
precision_score, balanced_accuracy_score,
classification_report)
from scipy import stats
from warnings import catch_warnings, simplefilter, warn, filterwarnings
# Tutorial Libraries
from . import __performance_metrics as pmtrc, __fairness_metrics as fcmtrc
from .__fairness_metrics import eq_odds_diff, eq_odds_ratio
from .__preprocessing import (standard_preprocess, stratified_preprocess,
report_labels, y_cols)
from .__validation import ValidationError
from .utils import format_errwarn, iterate_cohorts, limit_alert
# ToDo: find better solution for these warnings
filterwarnings('ignore', module='pandas')
filterwarnings('ignore', module='sklearn')
''' Mini Reports '''
def classification_performance(y_true, y_pred, target_labels=None,
sig_fig:int=4):
""" Returns a pandas dataframe of the scikit-learn classification report,
formatted for use in fairMLHealth tools
Args:
y_true (array): Target values. Must be compatible with model.predict().
y_pred (array): Prediction values. Must be compatible with
model.predict().
target_labels (list of str): Optional labels for target values.
"""
if target_labels is None:
target_labels = [f"target = {t}" for t in set(y_true)]
report = classification_report(y_true, y_pred, output_dict=True,
target_names=target_labels)
report = pd.DataFrame(report).transpose()
# Move accuracy to separate row
accuracy = report.loc['accuracy', :]
report.drop('accuracy', inplace=True)
report.loc['accuracy', 'accuracy'] = accuracy[0]
report = report.round(sig_fig)
return report
def regression_performance(y_true, y_pred, sig_fig:int=4):
""" Returns a pandas dataframe of the regression performance metrics,
similar to scikit's classification_performance
Args:
y_true (array): Target values. Must be compatible with model.predict().
y_pred (array): Prediction values. Must be compatible with
model.predict().
"""
report = {}
y = y_cols()['disp_names']['yt']
yh = y_cols()['disp_names']['yh']
report[f'{y} Mean'] = np.mean(y_true.values)
report[f'{yh} Mean'] = np.mean(y_pred.values)
report['MSE'] = mean_squared_error(y_true, y_pred)
report['MAE'] = mean_absolute_error(y_true, y_pred)
report['Rsqrd'] = pmtrc.r_squared(y_true, y_pred)
report = pd.DataFrame().from_dict(report, orient='index'
).rename(columns={0: 'Score'})
report = report.round(sig_fig)
return report
''' Main Reports '''
def flag(df, caption:str="", sig_fig:int=4, as_styler:bool=True):
""" Generates embedded html pandas styler table containing a highlighted
version of a model comparison dataframe
Args:
df (pandas dataframe): Model comparison dataframe (see)
caption (str, optional): Optional caption for table. Defaults to "".
as_styler (bool, optional): If True, returns a pandas Styler of the
highlighted table (to which other styles/highlights can be added).
Otherwise, returns the table as an embedded HTML object. Defaults
to False .
Returns:
Embedded html or pandas.io.formats.style.Styler
"""
return __Flagger().apply_flag(df, caption, sig_fig, as_styler)
def bias_report(X, y_true, y_pred, features:list=None, pred_type="classification",
sig_fig:int=4, flag_oor=True, **kwargs):
""" Generates a table of stratified bias metrics
Args:
X (array-like): Sample features
y_true (array-like, 1-D): Sample targets
y_pred (array-like, 1-D): Sample target predictions
features (list): columns in X to be assessed if not all columns.
Defaults to None (i.e. all columns).
pred_type (str, optional): One of "classification" or "regression".
Defaults to "classification".
flag_oor (bool): if true, will apply flagging function to highlight
fairness metrics which are considered to be outside the "fair" range
(Out Of Range). Defaults to False.
priv_grp (int): Specifies which label indicates the privileged
group. Defaults to 1.
Raises:
ValueError
Returns:
pandas Data Frame
"""
validtypes = ["classification", "regression"]
if pred_type not in validtypes:
raise ValueError(f"Summary report type must be one of {validtypes}")
if pred_type == "classification":
df = __classification_bias_report(X=X, y_true=y_true, y_pred=y_pred,
features=features, **kwargs)
elif pred_type == "regression":
df = __regression_bias_report(X=X, y_true=y_true, y_pred=y_pred,
features=features, **kwargs)
#
if flag_oor:
df = flag(df, sig_fig=sig_fig)
else:
df = df.round(sig_fig)
return df
def data_report(X, Y, features:list=None, targets:list=None, add_overview=True,
sig_fig:int=4):
"""
Generates a table of stratified data metrics
Args:
X (pandas dataframe or compatible object): sample data to be assessed
Y (pandas dataframe or compatible object): sample targets to be
assessed. Note that any observations with missing targets will be
ignored.
features (list): columns in X to be assessed if not all columns.
Defaults to None (i.e. all columns).
targets (list): columns in Y to be assessed if not all columns.
Defaults to None (i.e. all columns).
add_overview (bool): whether to add a summary row with metrics for
"ALL FEATURES" and "ALL VALUES" as a single group. Defaults to True.
Requirements:
Each feature must be discrete to run stratified analysis. If any data
are not discrete and there are more than 11 values, the reporter will
reformat those data into quantiles
Returns:
pandas Data Frame
"""
#
def entropy(x):
# use float type for x to avoid boolean interpretation issues if any
# pd.NA (integer na) values are prent
try:
_x = x.astype(float)
except ValueError: # convert strings to numeric categories
_x = pd.Categorical(x).codes
return stats.entropy(np.unique(_x, return_counts=True)[1], base=2)
def __data_dict(x, col):
''' Generates a dictionary of statistics '''
res = {'Obs.': x.shape[0]}
if not x[col].isna().all():
res[col + " Mean"] = x[col].mean()
res[col + " Median"] = x[col].median()
res[col + " Std. Dev."] = x[col].std()
else:
# Force addition of second column to ensure proper formatting
# as pandas series
for c in [col + " Mean", col + " Median", col + " Std. Dev."]:
res[c] = np.nan
return res
#
X_df = stratified_preprocess(X=X, features=features)
Y_df = stratified_preprocess(X=Y, features=targets)
if X_df.shape[0] != Y_df.shape[0]:
raise ValidationError("Number of observations mismatch between X and Y")
#
if features is None:
features = X_df.columns.tolist()
strat_feats = [f for f in features if f in X_df.columns]
limit_alert(strat_feats, item_name="features")
#
if targets is None:
targets = Y_df.columns.tolist()
strat_targs = [t for t in targets if t in Y_df.columns]
limit_alert(strat_targs, item_name="targets", limit=3,
issue="This may make the output difficult to read.")
#
res = []
# "Obs."" included in index for ease of calculation
ix_cols = ['Feature Name', 'Feature Value', 'Obs.']
for t in strat_targs:
X_df[t] = Y_df[t]
feat_subset = [f for f in strat_feats if f != t]
if not any(feat_subset):
continue
res_t = __apply_featureGroups(feat_subset, X_df, __data_dict, t)
# convert id columns to strings to work around bug in pd.concat
for m in ix_cols:
res_t[m] = res_t[m].astype(str)
res.append(res_t.set_index(ix_cols))
results = pd.concat(res, axis=1).reset_index()
#
results['Obs.'] = results['Obs.'].astype(float).astype(int)
results['Value Prevalence'] = results['Obs.']/X_df.shape[0]
n_missing = X_df[strat_feats].replace('nan', np.nan).isna().sum().reset_index()
n_missing.columns = ['Feature Name', 'Missing Values']
entropy = X_df[strat_feats].apply(axis=0, func=entropy).reset_index()
entropy.columns = ['Feature Name', 'Entropy']
results = results.merge(n_missing, how='left', on='Feature Name'
).merge(entropy, how='left', on='Feature Name')
#
if add_overview:
res = []
for i, t in enumerate(strat_targs):
res_t = pd.DataFrame(__data_dict(X_df, t), index=[0])
res.append(res_t.set_index('Obs.'))
overview = pd.concat(res, axis=1).reset_index()
N_feat = len(strat_feats)
N_missing = n_missing['Missing Values'].sum()
N_obs = X_df.shape[0]
overview['Feature Name'] = "ALL FEATURES"
overview['Feature Value'] = "ALL VALUES"
overview['Missing Values'] = N_missing,
overview['Value Prevalence'] = (N_obs*N_feat-N_missing)/(N_obs*N_feat)
rprt = pd.concat([overview, results], axis=0, ignore_index=True)
else:
rprt = results
#
rprt = sort_report(rprt)
rprt = rprt.round(sig_fig)
return rprt
def performance_report(X, y_true, y_pred, y_prob=None, features:list=None,
pred_type="classification", sig_fig:int=4,
add_overview=True):
""" Generates a table of stratified performance metrics
Args:
X (pandas dataframe or compatible object): sample data to be assessed
y_true (array-like, 1-D): Sample targets
y_pred (array-like, 1-D): Sample target predictions
y_prob (array-like, 1-D): Sample target probabilities. Defaults to None.
features (list): columns in X to be assessed if not all columns.
Defaults to None (i.e. all columns).
pred_type (str, optional): One of "classification" or "regression".
Defaults to "classification".
add_overview (bool): whether to add a summary row with metrics for
"ALL FEATURES" and "ALL VALUES" as a single group. Defaults to True.
Raises:
ValueError
Returns:
pandas DataFrame
"""
validtypes = ["classification", "regression"]
if pred_type not in validtypes:
raise ValueError(f"Summary report type must be one of {validtypes}")
if pred_type == "classification":
df = __classification_performance_report(X, y_true, y_pred, y_prob,
features, add_overview)
elif pred_type == "regression":
df = __regression_performance_report(X, y_true, y_pred,
features, add_overview)
#
df = df.round(sig_fig)
return df
def sort_report(report):
""" Sorts columns in standardized order
Args:
report (pd.DataFrame): any of the stratified reports produced by this
module
Returns:
pandas DataFrame: sorted report
"""
yname = y_cols()['disp_names']['yt']
yhname = y_cols()['disp_names']['yh']
head_names = ['Feature Name', 'Feature Value', 'Obs.',
f'{yname} Mean', f'{yhname} Mean']
head_cols = [c for c in head_names if c in report.columns]
tail_cols = sorted([c for c in report.columns if c not in head_cols])
return report[head_cols + tail_cols]
def summary_report(X, prtc_attr, y_true, y_pred, y_prob=None, flag_oor=True,
pred_type="classification", priv_grp=1, sig_fig:int=4,
**kwargs):
""" Generates a summary of fairness measures for a set of predictions
relative to their input data
Args:
X (array-like): Sample features
prtc_attr (array-like, named): Values for the protected attribute
(note: protected attribute may also be present in X)
y_true (array-like, 1-D): Sample targets
y_pred (array-like, 1-D): Sample target predictions
y_prob (array-like, 1-D): Sample target probabilities. Defaults to None.
flag_oor (bool): if true, will apply flagging function to highlight
fairness metrics which are considered to be outside the "fair" range
(Out Of Range). Defaults to False.
pred_type (str, optional): One of "classification" or "regression".
Defaults to "classification".
priv_grp (int): Specifies which label indicates the privileged
group. Defaults to 1.
Raises:
ValueError
Returns:
pandas DataFrame
"""
validtypes = ["classification", "regression"]
if pred_type not in validtypes:
raise ValueError(f"Summary report type must be one of {validtypes}")
if pred_type == "classification":
df = __classification_summary(X=X, prtc_attr=prtc_attr, y_true=y_true,
y_pred=y_pred, y_prob=y_prob,
priv_grp=priv_grp, **kwargs)
elif pred_type == "regression":
df = __regression_summary(X=X, prtc_attr=prtc_attr, y_true=y_true,
y_pred=y_pred, priv_grp=priv_grp, **kwargs)
#
if flag_oor:
df = flag(df, sig_fig=sig_fig)
else:
df = df.round(sig_fig)
return df
''' Private Functions '''
@format_errwarn
def __apply_featureGroups(features, df, func, *args):
""" Iteratively applies a function across groups of each stratified feature,
collecting errors and warnings to be displayed succinctly after processing
Args:
features (list): columns of df to be iteratively analyzed
df (pd.DataFrame): data to be analyzed
func (function): a function accepting *args and returning a dictionary
Returns:
pandas DataFrame: set of results for each feature-value
"""
#
errs = {}
warns = {}
res = []
for f in features:
# Data are expected in string format
with catch_warnings(record=True) as w:
simplefilter("always")
try:
grp = df.groupby(f)
grp_res = grp.apply(lambda x: pd.Series(func(x, *args)))
except BaseException as e:
errs[f] = e
continue
if len(w) > 0:
warns[f] = w
grp_res = grp_res.reset_index().rename(columns={f: 'Feature Value'})
grp_res.insert(0, 'Feature Name', f)
res.append(grp_res)
if len(res) == 0:
results = pd.DataFrame(columns=['Feature Name', 'Feature Value'])
else:
results = pd.concat(res, ignore_index=True)
return results, errs, warns
@format_errwarn
def __apply_biasGroups(features, df, func, yt, yh):
""" Iteratively applies a function across groups of each stratified feature,
collecting errors and warnings to be displayed succinctly after processing.
Args:
features (list): columns of df to be iteratively analyzed
df (pd.DataFrame): data to be analyzed
func (function): a function accepting two array arguments for comparison
(selected from df as yt and yh), as well as a pa_name (str) and
priv_grp (int) which will be set by __apply_biasGroups. This function
must return a dictionary.
yt (string): name of column found in df containing target values
yh (string): name of column found in df containing predicted values
Returns:
pandas DataFrame: set of results for each feature-value
"""
#
errs = {}
warns = {}
pa_name = 'prtc_attr'
res = []
for f in features:
df[f] = df[f].astype(str)
vals = sorted(df[f].unique().tolist())
# AIF360 can't handle float types
for v in vals:
df[pa_name] = 0
df.loc[df[f].eq(v), pa_name] = 1
if v != "nan":
df.loc[df[f].eq("nan"), pa_name] = np.nan
# Nothing to measure if only one value is present (other than nan)
if df[pa_name].nunique() == 1:
continue
# Data are expected in string format
with catch_warnings(record=True) as w:
simplefilter("always")
subset = df.loc[df[pa_name].notnull(),
[pa_name, yt, yh]].set_index(pa_name)
try:
#
grp_res = func(subset[yt], subset[yh], pa_name, priv_grp=1)
except BaseException as e:
errs[f] = e
continue
if len(w) > 0:
warns[f] = w
grp_res = | pd.DataFrame(grp_res, index=[0]) | pandas.DataFrame |
from contextlib import nullcontext as does_not_raise
from functools import partial
import pandas as pd
from pandas.testing import assert_series_equal
from solarforecastarbiter import datamodel
from solarforecastarbiter.reference_forecasts import persistence
from solarforecastarbiter.conftest import default_observation
import pytest
def load_data_base(data, observation, data_start, data_end):
# slice doesn't care about closed or interval label
# so here we manually adjust start and end times
if 'instant' in observation.interval_label:
pass
elif observation.interval_label == 'ending':
data_start += pd.Timedelta('1s')
elif observation.interval_label == 'beginning':
data_end -= pd.Timedelta('1s')
return data[data_start:data_end]
@pytest.fixture
def powerplant_metadata():
"""1:1 AC:DC"""
modeling_params = datamodel.FixedTiltModelingParameters(
ac_capacity=200, dc_capacity=200, temperature_coefficient=-0.3,
dc_loss_factor=3, ac_loss_factor=0,
surface_tilt=30, surface_azimuth=180)
metadata = datamodel.SolarPowerPlant(
name='Albuquerque Baseline', latitude=35.05, longitude=-106.54,
elevation=1657.0, timezone='America/Denver',
modeling_parameters=modeling_params)
return metadata
@pytest.mark.parametrize('interval_label,closed,end', [
('beginning', 'left', '20190404 1400'),
('ending', 'right', '20190404 1400'),
('instant', None, '20190404 1359')
])
def test_persistence_scalar(site_metadata, interval_label, closed, end):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
data = pd.Series(100., index=data_index)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp(end, tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
fx = persistence.persistence_scalar(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data=load_data)
expected_index = pd.date_range(
start='20190404 1300', end=end, freq='5min', tz=tz,
closed=closed)
expected = pd.Series(100., index=expected_index)
assert_series_equal(fx, expected)
@pytest.mark.parametrize('obs_interval_label', ('beginning', 'ending',
'instant'))
@pytest.mark.parametrize('interval_label,closed,end', [
('beginning', 'left', '20190406 0000'),
('ending', 'right', '20190406 0000'),
('instant', None, '20190405 2359')
])
def test_persistence_interval(site_metadata, obs_interval_label,
interval_label, closed, end):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min',
interval_label=obs_interval_label)
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
# each element of data is equal to the hour value of its label
data = pd.Series(data_index.hour, index=data_index, dtype=float)
if obs_interval_label == 'ending':
# e.g. timestamp 12:00:00 should be equal to 11
data = data.shift(1).fillna(0)
data_start = pd.Timestamp('20190404 0000', tz=tz)
data_end = pd.Timestamp(end, tz=tz) - pd.Timedelta('1d')
forecast_start = pd.Timestamp('20190405 0000', tz=tz)
interval_length = pd.Timedelta('60min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start='20190405 0000', end=end, freq='60min', tz=tz, closed=closed)
expected_vals = list(range(0, 24))
expected = pd.Series(expected_vals, index=expected_index, dtype=float)
# handle permutations of parameters that should fail
if data_end.minute == 59 and obs_interval_label != 'instant':
expectation = pytest.raises(ValueError)
elif data_end.minute == 0 and obs_interval_label == 'instant':
expectation = pytest.raises(ValueError)
else:
expectation = does_not_raise()
with expectation:
fx = persistence.persistence_interval(
observation, data_start, data_end, forecast_start,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected)
def test_persistence_interval_missing_data(site_metadata):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min',
interval_label='ending')
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404T1200', end='20190406', freq='5min', tz=tz)
# each element of data is equal to the hour value of its label
end = '20190406 0000'
data = pd.Series(data_index.hour, index=data_index, dtype=float)
data = data.shift(1)
data_start = pd.Timestamp('20190404 0000', tz=tz)
data_end = pd.Timestamp(end, tz=tz) - pd.Timedelta('1d')
forecast_start = pd.Timestamp('20190405 0000', tz=tz)
interval_length = pd.Timedelta('60min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start='20190405 0000', end=end, freq='60min', tz=tz, closed='right')
expected_vals = [None] * 12 + list(range(12, 24))
expected = pd.Series(expected_vals, index=expected_index, dtype=float)
fx = persistence.persistence_interval(
observation, data_start, data_end, forecast_start,
interval_length, 'ending', load_data)
assert_series_equal(fx, expected)
@pytest.fixture
def uniform_data():
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
data = pd.Series(100., index=data_index)
return data
@pytest.mark.parametrize(
'interval_label,expected_index,expected_ghi,expected_ac,obsscale', (
('beginning',
['20190404 1300', '20190404 1330'],
[96.41150694741889, 91.6991546408236],
[96.60171202566896, 92.074796727846],
1),
('ending',
['20190404 1330', '20190404 1400'],
[96.2818141290749, 91.5132934827808],
[96.47816752344607, 91.89460837042301],
1),
# test clipped at 2x clearsky
('beginning',
['20190404 1300', '20190404 1330'],
[1926.5828549018618, 1832.4163238767312],
[383.1524464326973, 365.19729186262526],
50)
)
)
def test_persistence_scalar_index(
powerplant_metadata, uniform_data, interval_label,
expected_index, expected_ghi, expected_ac, obsscale):
# ac_capacity is 200 from above
observation = default_observation(
powerplant_metadata, interval_length='5min',
interval_label='beginning')
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label='beginning', variable='ac_power')
data = uniform_data * obsscale
tz = data.index.tzinfo
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
interval_length = pd.Timedelta('30min')
load_data = partial(load_data_base, data)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
expected_index, tz=tz, freq=interval_length)
expected = pd.Series(expected_ghi, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected = pd.Series(expected_ac, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
def test_persistence_scalar_index_instant_obs_fx(
site_metadata, powerplant_metadata, uniform_data):
# instantaneous obs and fx
interval_length = pd.Timedelta('30min')
interval_label = 'instant'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label=interval_label, variable='ac_power')
data = uniform_data
tz = data.index.tzinfo
load_data = partial(load_data_base, data)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1259', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1359', tz=tz)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
['20190404 1300', '20190404 1330'], tz=tz, freq=interval_length)
expected_values = [96.59022431746838, 91.99405501672328]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_values = [96.77231379880752, 92.36198028963426]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
# instant obs and fx, but with offset added to starts instead of ends
data_start = pd.Timestamp('20190404 1201', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1301', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
['20190404 1300', '20190404 1330'], tz=tz, freq=interval_length)
expected_values = [96.55340033645147, 91.89662922267517]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
def test_persistence_scalar_index_invalid_times_instant(site_metadata):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_label = 'instant'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
# instant obs that cover the whole interval - not allowed!
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
interval_length = pd.Timedelta('30min')
with pytest.raises(ValueError):
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
@pytest.mark.parametrize('data_start,data_end,forecast_start,forecast_end', (
('20190404 1201', '20190404 1300', '20190404 1300', '20190404 1400'),
('20190404 1200', '20190404 1259', '20190404 1300', '20190404 1400'),
('20190404 1200', '20190404 1300', '20190404 1301', '20190404 1400'),
('20190404 1200', '20190404 1300', '20190404 1300', '20190404 1359'),
))
def test_persistence_scalar_index_invalid_times_interval(
site_metadata, interval_label, data_start, data_end, forecast_start,
forecast_end):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_length = pd.Timedelta('30min')
# base times to mess with
data_start = pd.Timestamp(data_start, tz=tz)
data_end = pd.Timestamp(data_end, tz=tz)
forecast_start = pd.Timestamp(forecast_start, tz=tz)
forecast_end = pd.Timestamp(forecast_end, tz=tz)
# interval average obs with invalid starts/ends
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
errtext = "with interval_label beginning or ending"
with pytest.raises(ValueError) as excinfo:
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert errtext in str(excinfo.value)
def test_persistence_scalar_index_invalid_times_invalid_label(site_metadata):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_length = pd.Timedelta('30min')
interval_label = 'invalid'
observation = default_observation(
site_metadata, interval_length='5min')
object.__setattr__(observation, 'interval_label', interval_label)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
with pytest.raises(ValueError) as excinfo:
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert "invalid interval_label" in str(excinfo.value)
def test_persistence_scalar_index_low_solar_elevation(
site_metadata, powerplant_metadata):
interval_label = 'beginning'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label=interval_label, variable='ac_power')
# at ABQ Baseline, solar apparent zenith for these points is
# 2019-05-13 12:00:00+00:00 91.62
# 2019-05-13 12:05:00+00:00 90.09
# 2019-05-13 12:10:00+00:00 89.29
# 2019-05-13 12:15:00+00:00 88.45
# 2019-05-13 12:20:00+00:00 87.57
# 2019-05-13 12:25:00+00:00 86.66
tz = 'UTC'
data_start = pd.Timestamp('20190513 1200', tz=tz)
data_end = pd.Timestamp('20190513 1230', tz=tz)
index = pd.date_range(start=data_start, end=data_end,
freq='5min', closed='left')
# clear sky 5 min avg (from 1 min avg) GHI is
# [0., 0.10932908, 1.29732454, 4.67585122, 10.86548521, 19.83487399]
# create data series that could produce obs / clear of
# 0/0, 1/0.1, -1/1.3, 5/5, 10/10, 20/20
# average without limits is (10 - 1 + 1 + 1 + 1) / 5 = 2.4
# average with element limits of [0, 2] = (2 + 0 + 1 + 1 + 1) / 5 = 1
data = pd.Series([0, 1, -1, 5, 10, 20.], index=index)
forecast_start = pd.Timestamp('20190513 1230', tz=tz)
forecast_end = pd.Timestamp('20190513 1300', tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start=forecast_start, end=forecast_end, freq='5min', closed='left')
# clear sky 5 min avg GHI is
# [31.2, 44.5, 59.4, 75.4, 92.4, 110.1]
expected_vals = [31.2, 44.5, 59.4, 75.4, 92.4, 110.1]
expected = pd.Series(expected_vals, index=expected_index)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected, check_less_precise=1, check_names=False)
expected = pd.Series([0.2, 0.7, 1.2, 1.6, 2., 2.5], index=expected_index)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected, check_less_precise=1, check_names=False)
@pytest.mark.parametrize("interval_label", [
'beginning', 'ending'
])
@pytest.mark.parametrize("obs_values,axis,constant_values,expected_values", [
# constant_values = variable values
# forecasts = percentiles [%]
([0, 0, 0, 20, 20, 20], 'x', [10, 20], [50, 100]),
# constant_values = percentiles [%]
# forecasts = variable values
([0, 0, 0, 4, 4, 4], 'y', [50], [2]),
# invalid axis
pytest.param([0, 0, 0, 4, 4, 4], 'percentile', [-1], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
])
def test_persistence_probabilistic(site_metadata, interval_label, obs_values,
axis, constant_values, expected_values):
tz = 'UTC'
interval_length = '5min'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
data_start = pd.Timestamp('20190513 1200', tz=tz)
data_end = pd.Timestamp('20190513 1230', tz=tz)
closed = datamodel.CLOSED_MAPPING[interval_label]
index = pd.date_range(start=data_start, end=data_end, freq='5min',
closed=closed)
data = pd.Series(obs_values, index=index, dtype=float)
forecast_start = pd.Timestamp('20190513 1230', tz=tz)
forecast_end = pd.Timestamp('20190513 1300', tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(start=forecast_start, end=forecast_end,
freq=interval_length, closed=closed)
forecasts = persistence.persistence_probabilistic(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for i, fx in enumerate(forecasts):
pd.testing.assert_index_equal(fx.index, expected_index,
check_categorical=False)
pd.testing.assert_series_equal(
fx,
pd.Series(expected_values[i], index=expected_index, dtype=float)
)
@pytest.mark.parametrize("obs_values,axis,constant_values,expected_values", [
# constant_values = variable values
# forecasts = percentiles [%]
([0] * 11 + [20] * 11, 'x', [10, 20], [50, 100]),
([0] * 11 + [20] * 11, 'x', [10, 20], [50, 100]),
# constant_values = percentiles [%]
# forecasts = variable values
([0] * 11 + [4] * 11, 'y', [50], [2]),
# invalid axis
pytest.param([0] * 11 + [4] * 11, 'percentile', [-1], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
# insufficient observation data
pytest.param([5.3, 7.3, 1.4] * 4, 'x', [50], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
pytest.param([], 'x', [50], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
pytest.param([None]*10, 'x', [50], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
])
def test_persistence_probabilistic_timeofday(site_metadata, obs_values, axis,
constant_values, expected_values):
tz = 'UTC'
interval_label = "beginning"
interval_length = '1h'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
# all observations at 9am each day
data_end = pd.Timestamp('20190513T0900', tz=tz)
data_start = data_end - pd.Timedelta("{}D".format(len(obs_values)))
closed = datamodel.CLOSED_MAPPING[interval_label]
index = pd.date_range(start=data_start, end=data_end, freq='1D',
closed=closed)
data = pd.Series(obs_values, index=index, dtype=float)
# forecast 9am
forecast_start = pd.Timestamp('20190514T0900', tz=tz)
forecast_end = pd.Timestamp('20190514T1000', tz=tz)
interval_length = pd.Timedelta('1h')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(start=forecast_start, end=forecast_end,
freq=interval_length, closed=closed)
forecasts = persistence.persistence_probabilistic_timeofday(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for i, fx in enumerate(forecasts):
pd.testing.assert_index_equal(fx.index, expected_index,
check_categorical=False)
pd.testing.assert_series_equal(
fx,
pd.Series(expected_values[i], index=expected_index, dtype=float)
)
@pytest.mark.parametrize("data_end,forecast_start", [
# no timezone
(pd.Timestamp("20190513T0900"), pd.Timestamp("20190514T0900")),
# same timezone
(
pd.Timestamp("20190513T0900", tz="UTC"),
pd.Timestamp("20190514T0900", tz="UTC")
),
# different timezone
(
pd.Timestamp("20190513T0200", tz="US/Pacific"),
pd.Timestamp("20190514T0900", tz="UTC")
),
# obs timezone, but no fx timezone
(
pd.Timestamp("20190513T0900", tz="UTC"),
pd.Timestamp("20190514T0900")
),
# no obs timezone, but fx timezone
(
pd.Timestamp("20190513T0900"),
pd.Timestamp("20190514T0900", tz="UTC")
),
])
def test_persistence_probabilistic_timeofday_timezone(site_metadata, data_end,
forecast_start):
obs_values = [0] * 11 + [20] * 11
axis, constant_values, expected_values = 'x', [10, 20], [50, 100]
interval_label = "beginning"
interval_length = '1h'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
# all observations at 9am each day
data_start = data_end - pd.Timedelta("{}D".format(len(obs_values)))
closed = datamodel.CLOSED_MAPPING[interval_label]
index = pd.date_range(start=data_start, end=data_end, freq='1D',
closed=closed)
data = pd.Series(obs_values, index=index, dtype=float)
# forecast 9am
forecast_end = forecast_start + pd.Timedelta("1h")
interval_length = pd.Timedelta('1h')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(start=forecast_start, end=forecast_end,
freq=interval_length, closed=closed)
# if forecast without timezone, then use obs timezone
if data.index.tzinfo is not None and forecast_start.tzinfo is None:
expected_index = expected_index.tz_localize(data.index.tzinfo)
forecasts = persistence.persistence_probabilistic_timeofday(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for i, fx in enumerate(forecasts):
pd.testing.assert_index_equal(fx.index, expected_index,
check_categorical=False)
pd.testing.assert_series_equal(
fx,
pd.Series(expected_values[i], index=expected_index, dtype=float)
)
@pytest.mark.parametrize("interval_label", [
'beginning', 'ending'
])
@pytest.mark.parametrize("obs_values,axis,constant_values,expected_values", [
# constant_values = variable values
# forecasts = percentiles [%]
([0] * 15 + [20] * 15, 'x', [10, 20], [50, 100]),
# constant_values = percentiles [%]
# forecasts = variable values
([0] * 15 + [4] * 15, 'y', [50], [2]),
([None] * 30, 'y', [50], [None]),
([0] * 10 + [None] * 10 + [20] * 10, 'x', [10, 20], [50, 100]),
([0] * 10 + [None] * 10 + [4] * 10, 'y', [50], [2]),
])
def test_persistence_probabilistic_resampling(
site_metadata,
interval_label,
obs_values, axis,
constant_values,
expected_values
):
tz = 'UTC'
interval_length = '1min'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
data_start = pd.Timestamp('20190513 1200', tz=tz)
data_end = pd.Timestamp('20190513 1230', tz=tz)
closed = datamodel.CLOSED_MAPPING[interval_label]
index = pd.date_range(start=data_start, end=data_end, freq='1min',
closed=closed)
data = pd.Series(obs_values, index=index, dtype=float)
forecast_start = pd.Timestamp('20190513 1230', tz=tz)
forecast_end = pd.Timestamp('20190513 1300', tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(start=forecast_start, end=forecast_end,
freq=interval_length, closed=closed)
forecasts = persistence.persistence_probabilistic(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for i, fx in enumerate(forecasts):
pd.testing.assert_index_equal(fx.index, expected_index,
check_categorical=False)
pd.testing.assert_series_equal(
fx,
pd.Series(expected_values[i], index=expected_index, dtype=float)
)
# all observations 9-10 each day.
# This index is for (09:00, 10:00] (interval_label=ending), but subtract
# 30 minutes for [09:00, 10:00) (interval_label=beginning)
PROB_PERS_TOD_OBS_INDEX = pd.DatetimeIndex([
'2019-04-21 09:30:00+00:00', '2019-04-21 10:00:00+00:00',
'2019-04-22 09:30:00+00:00', '2019-04-22 10:00:00+00:00',
'2019-04-23 09:30:00+00:00', '2019-04-23 10:00:00+00:00',
'2019-04-24 09:30:00+00:00', '2019-04-24 10:00:00+00:00',
'2019-04-25 09:30:00+00:00', '2019-04-25 10:00:00+00:00',
'2019-04-26 09:30:00+00:00', '2019-04-26 10:00:00+00:00',
'2019-04-27 09:30:00+00:00', '2019-04-27 10:00:00+00:00',
'2019-04-28 09:30:00+00:00', '2019-04-28 10:00:00+00:00',
'2019-04-29 09:30:00+00:00', '2019-04-29 10:00:00+00:00',
'2019-04-30 09:30:00+00:00', '2019-04-30 10:00:00+00:00',
'2019-05-01 09:30:00+00:00', '2019-05-01 10:00:00+00:00',
'2019-05-02 09:30:00+00:00', '2019-05-02 10:00:00+00:00',
'2019-05-03 09:30:00+00:00', '2019-05-03 10:00:00+00:00',
'2019-05-04 09:30:00+00:00', '2019-05-04 10:00:00+00:00',
'2019-05-05 09:30:00+00:00', '2019-05-05 10:00:00+00:00',
'2019-05-06 09:30:00+00:00', '2019-05-06 10:00:00+00:00',
'2019-05-07 09:30:00+00:00', '2019-05-07 10:00:00+00:00',
'2019-05-08 09:30:00+00:00', '2019-05-08 10:00:00+00:00',
'2019-05-09 09:30:00+00:00', '2019-05-09 10:00:00+00:00',
'2019-05-10 09:30:00+00:00', '2019-05-10 10:00:00+00:00',
'2019-05-11 09:30:00+00:00', '2019-05-11 10:00:00+00:00',
'2019-05-12 09:30:00+00:00', '2019-05-12 10:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
@pytest.mark.parametrize('obs_interval_label_index', [
('beginning', PROB_PERS_TOD_OBS_INDEX - pd.Timedelta('30min')),
('ending', PROB_PERS_TOD_OBS_INDEX)
])
@pytest.mark.parametrize('fx_interval_label_index', [
('beginning', pd.DatetimeIndex(['20190514T0900Z'], freq='1h')),
('ending', pd.DatetimeIndex(['20190514T1000Z'], freq='1h'))
])
@pytest.mark.parametrize("obs_values,axis,constant_values,expected_values", [
# constant_values = variable values
# forecasts = percentiles [%]
# intervals always average to 10 if done properly, but 0 or 20 if
# done improperly
([0, 20] * 22, 'x', [10, 20], [100., 100.]),
# constant_values = percentiles [%]
# forecasts = variable values
([0, 4] * 22, 'y', [50], [2.]),
# works with nan
([None, 4] * 22, 'y', [50], [4.]),
([0.] + [None] * 42 + [4.], 'y', [50], [2.]),
# first interval averages to 0, last to 20, else nan
([0.] + [None] * 42 + [20.], 'x', [10, 20], [50., 100.]),
])
def test_persistence_probabilistic_timeofday_resample(
site_metadata,
obs_values,
axis,
constant_values,
expected_values,
obs_interval_label_index,
fx_interval_label_index
):
obs_interval_label, obs_index = obs_interval_label_index
fx_interval_label, fx_index = fx_interval_label_index
tz = 'UTC'
observation = default_observation(
site_metadata,
interval_length='30min',
interval_label=obs_interval_label
)
data_start = pd.Timestamp('20190421T0900', tz=tz)
data_end = pd.Timestamp('20190512T1000', tz=tz)
data = pd.Series(obs_values, index=obs_index, dtype=float)
# forecast 9am - 10am, but label will depend on inputs
forecast_start = pd.Timestamp('20190514T0900', tz=tz)
forecast_end = pd.Timestamp('20190514T1000', tz=tz)
interval_length = pd.Timedelta('1h')
load_data = partial(load_data_base, data)
expected_index = fx_index
forecasts = persistence.persistence_probabilistic_timeofday(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, fx_interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for expected, fx in zip(expected_values, forecasts):
pd.testing.assert_series_equal(
fx,
pd.Series(expected, index=expected_index)
)
PROB_PERS_TOD_OBS_INDEX_2H = PROB_PERS_TOD_OBS_INDEX.union(
PROB_PERS_TOD_OBS_INDEX + pd.Timedelta('1h')
)
@pytest.mark.parametrize('obs_interval_label_index', [
('beginning', PROB_PERS_TOD_OBS_INDEX_2H - pd.Timedelta('30min')),
('ending', PROB_PERS_TOD_OBS_INDEX_2H)
])
@pytest.mark.parametrize('fx_interval_label_index', [
(
'beginning',
pd.DatetimeIndex(['20190514T0900Z', '20190514T1000Z'], freq='1h')
),
(
'ending',
pd.DatetimeIndex(['20190514T1000Z', '20190514T1100Z'], freq='1h')
)
])
@pytest.mark.parametrize("obs_values,axis,constant_values,expected_values", [
# first interval averages to 0, last to 20, else nan
([0.] + [None] * 86 + [20.], 'x', [10, 20], [[100., 0.], [100., 100.]]),
# no valid observations in first forecast hour
(
[None, None, 20., 20.] * 22,
'x',
[10, 20],
[[None, 0.], [None, 100.]]
),
])
def test_persistence_probabilistic_timeofday_resample_2h(
site_metadata,
obs_values,
axis,
constant_values,
expected_values,
obs_interval_label_index,
fx_interval_label_index
):
obs_interval_label, obs_index = obs_interval_label_index
fx_interval_label, fx_index = fx_interval_label_index
tz = 'UTC'
observation = default_observation(
site_metadata,
interval_length='30min',
interval_label=obs_interval_label
)
data_start = pd.Timestamp('20190421T0900', tz=tz)
data_end = pd.Timestamp('20190512T1100', tz=tz)
data = pd.Series(obs_values, index=obs_index, dtype=float)
# forecast 9am - 11am, but label will depend on inputs
forecast_start = pd.Timestamp('20190514T0900', tz=tz)
forecast_end = pd.Timestamp('20190514T1100', tz=tz)
interval_length = pd.Timedelta('1h')
load_data = partial(load_data_base, data)
expected_index = fx_index
forecasts = persistence.persistence_probabilistic_timeofday(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, fx_interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for expected, fx in zip(expected_values, forecasts):
pd.testing.assert_series_equal(
fx,
pd.Series(expected, index=expected_index)
)
@pytest.mark.parametrize("interval_label", [
'beginning', 'ending'
])
@pytest.mark.parametrize('axis', ['x', 'y'])
def test_persistence_probabilistic_no_data(
site_metadata, interval_label, axis):
tz = 'UTC'
interval_length = '5min'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
data_start = | pd.Timestamp('20190513 1200', tz=tz) | pandas.Timestamp |
"""dynamic user-input-responsive part of mood, and mood graphs"""
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from scipy.signal import lsim, lti
from scipy.signal.ltisys import StateSpaceContinuous
from tqdm.autonotebook import tqdm
from IPython.display import display
from persistence.response_cache import (
ResponseCache,
UserInputIdentifier,
)
from feels.mood import (
random_mood_at_pst_datetime,
logit_diff_to_pos_sent,
pos_sent_to_logit_diff,
)
from util.past import MILESTONE_TIMES
from util.times import now_pst, fromtimestamp_pst
MOOD_IMAGE_DIR = "data/mood_images/"
STEP_SEC = 30 * 1
TAU_SEC = 3600 * 12
TAU_SEC_2ND = 60 * 60
WEIGHTED_AVG_START_TIME = | pd.Timestamp("2021-01-04 09:10:00") | pandas.Timestamp |
import datetime
import os
import shutil
import unittest
from copy import deepcopy
from typing import Optional, Tuple, Any, Callable, Dict, Sequence, List
from unittest.mock import patch
import pandas as pd
from pandas.testing import assert_frame_equal
from datacode.models.column.column import Column
from datacode.models.dtypes.str_type import StringType
from datacode.models.source import DataSource
from datacode.models.variables import Variable
from datacode.models.variables.expression import Expression
from datacode import Transform, DataOutputNotSafeException, Index, ColumnIndex
from tests.utils import GENERATED_PATH, assert_frame_not_equal
def transform_cell_data_func(col: Column, variable: Variable, cell: Any) -> Any:
if isinstance(cell, str):
return cell
return cell + 1
def transform_series_data_func(col: Column, variable: Variable, series: pd.Series) -> pd.Series:
return series + 1
def transform_dataframe_data_func(col: Column, variable: Variable, df: pd.DataFrame) -> pd.DataFrame:
df[variable.name] = df[variable.name] + 1
return df
def transform_source_data_func(col: Column, variable: Variable, source: DataSource) -> DataSource:
# Extra unnecessary logic to access source.columns to test looking up columns
cols = source.columns
for this_col in cols:
if not this_col.variable.key == col.variable.key:
continue
if not variable.dtype.is_numeric:
continue
source.df[variable.name] = source.df[variable.name] + 1
return source
def expression_series_func(cols: Sequence[Column]) -> pd.Series:
return cols[0].series + cols[1].series
class SourceTest(unittest.TestCase):
test_df = pd.DataFrame(
[
(1, 2, 'd'),
(3, 4, 'd'),
(5, 6, 'e')
],
columns=['a', 'b', 'c']
)
expect_df_no_rename_a_plus_1 = pd.DataFrame(
[
(2, 2, 'd'),
(4, 4, 'd'),
(6, 6, 'e')
],
columns=['a', 'b', 'c']
)
expect_df_no_rename_a_plus_2 = pd.DataFrame(
[
(3, 2, 'd'),
(5, 4, 'd'),
(7, 6, 'e')
],
columns=['a', 'b', 'c']
)
expect_loaded_df_rename_only = pd.DataFrame(
[
(1, 2, 'd'),
(3, 4, 'd'),
(5, 6, 'e')
],
columns=['A', 'B', 'C'],
).convert_dtypes()
expect_loaded_df_rename_only_indexed_c = expect_loaded_df_rename_only.set_index('C')
expect_loaded_df_rename_only_a_indexed_c = expect_loaded_df_rename_only_indexed_c.drop('B', axis=1)
expect_loaded_df_rename_only_a_b = pd.DataFrame(
[
(1, 2,),
(3, 4,),
(5, 6,)
],
columns=['A', 'B']
).convert_dtypes()
expect_loaded_df_with_transform = pd.DataFrame(
[
(2, 3, 'd'),
(4, 5, 'd'),
(6, 7, 'e')
],
columns=['A_1', 'B_1', 'C']
).convert_dtypes()
expect_loaded_df_with_a_and_a_transform = pd.DataFrame(
[
(1, 2, 2, 'd'),
(3, 4, 4, 'd'),
(5, 6, 6, 'e')
],
columns=['A', 'A_1', 'B', 'C']
).convert_dtypes()
expect_loaded_df_with_a_transform_and_a = pd.DataFrame(
[
(2, 1, 2, 'd'),
(4, 3, 4, 'd'),
(6, 5, 6, 'e')
],
columns=['A_1', 'A', 'B', 'C']
).convert_dtypes()
expect_loaded_df_with_transform_only_a_b = pd.DataFrame(
[
(2, 3,),
(4, 5,),
(6, 7,)
],
columns=['A_1', 'B_1']
).convert_dtypes()
expect_loaded_df_with_transform_and_a_pre_transformed = pd.DataFrame(
[
(1, 3, 'd'),
(3, 5, 'd'),
(5, 7, 'e')
],
columns=['A_1', 'B_1', 'C']
).convert_dtypes()
expect_loaded_df_with_calculated = pd.DataFrame(
[
(1, 2, 'd', 3),
(3, 4, 'd', 7),
(5, 6, 'e', 11)
],
columns=['A', 'B', 'C', 'D'],
).convert_dtypes()
expect_loaded_df_with_calculated_c_d_only = pd.DataFrame(
[
('d', 3),
('d', 7),
('e', 11)
],
columns=['C', 'D'],
).convert_dtypes()
expect_loaded_df_with_calculated_transformed = pd.DataFrame(
[
(1, 2, 'd', 4),
(3, 4, 'd', 8),
(5, 6, 'e', 12)
],
columns=['A', 'B', 'C', 'D_1'],
).convert_dtypes()
expect_loaded_df_with_calculated_and_calculated_transformed = pd.DataFrame(
[
(1, 2, 'd', 3, 4),
(3, 4, 'd', 7, 8),
(5, 6, 'e', 11, 12)
],
columns=['A', 'B', 'C', 'D', 'D_1'],
).convert_dtypes()
expect_loaded_df_with_calculated_transformed_and_calculated = pd.DataFrame(
[
(1, 2, 'd', 4, 3),
(3, 4, 'd', 8, 7),
(5, 6, 'e', 12, 11)
],
columns=['A', 'B', 'C', 'D_1', 'D'],
).convert_dtypes()
expect_loaded_df_with_calculate_on_transformed_before_transform = pd.DataFrame(
[
(2, 3, 'd', 3),
(4, 5, 'd', 7),
(6, 7, 'e', 11)
],
columns=['A_1', 'B_1', 'C', 'D'],
).convert_dtypes()
expect_loaded_df_with_calculate_on_transformed_after_transform = pd.DataFrame(
[
(2, 3, 'd', 5),
(4, 5, 'd', 9),
(6, 7, 'e', 13)
],
columns=['A_1', 'B_1', 'C', 'D'],
).convert_dtypes()
expect_loaded_df_with_calculate_on_transformed_before_and_after_transform = pd.DataFrame(
[
(2, 3, 'd', 4),
(4, 5, 'd', 8),
(6, 7, 'e', 12)
],
columns=['A_1', 'B_1', 'C', 'D'],
).convert_dtypes()
expect_loaded_df_categorical = expect_loaded_df_rename_only.copy()
expect_loaded_df_categorical['C'] = expect_loaded_df_categorical['C'].astype('category')
transform_name_func = lambda x: f'{x}_1'
transform_cell = Transform('add_one_cell', transform_name_func, transform_cell_data_func, data_func_target='cell')
transform_series = Transform('add_one_series', transform_name_func, transform_series_data_func, data_func_target='series')
transform_dataframe = Transform('add_one_df', transform_name_func, transform_dataframe_data_func, data_func_target='dataframe')
transform_source = Transform('add_one_source', transform_name_func, transform_source_data_func, data_func_target='source')
csv_path = os.path.join(GENERATED_PATH, 'data.csv')
def setup_method(self, *args, **kwargs):
if os.path.exists(GENERATED_PATH):
shutil.rmtree(GENERATED_PATH)
os.makedirs(GENERATED_PATH)
def teardown_method(self, *args, **kwargs):
shutil.rmtree(GENERATED_PATH)
def create_source(self, **kwargs) -> DataSource:
config_dict = dict(
df=self.test_df,
location=self.csv_path,
)
config_dict.update(kwargs)
return DataSource(**config_dict)
def get_transform(self, func_type: str) -> Transform:
if func_type == 'cell':
return self.transform_cell
elif func_type == 'series':
return self.transform_series
elif func_type == 'dataframe':
return self.transform_dataframe
elif func_type == 'source':
return self.transform_source
else:
raise ValueError(
f'could not look up func_type {func_type}, should be one of cell, series, dataframe, source')
def create_csv(self, df: Optional[pd.DataFrame] = None, **to_csv_kwargs):
if df is None:
df = self.test_df
df.to_csv(self.csv_path, index=False, **to_csv_kwargs)
def get_transform_dict(self, transform_data: str = '', apply_transforms: bool = True):
if transform_data:
transform = self.get_transform(transform_data)
transform_dict = dict(
available_transforms=[transform],
)
if apply_transforms:
transform_dict['applied_transforms'] = [transform]
else:
transform_dict = {}
return transform_dict
def create_variables(self, transform_data: str = '', apply_transforms: bool = True) -> Tuple[Variable, Variable, Variable]:
transform_dict = self.get_transform_dict(transform_data=transform_data, apply_transforms=apply_transforms)
a = Variable('a', 'A', dtype='int', **transform_dict)
b = Variable('b', 'B', dtype='int', **transform_dict)
c = Variable('c', 'C', dtype='str')
return a, b, c
def create_columns(self, transform_data: str = '', apply_transforms: bool = True) -> List[Column]:
a, b, c = self.create_variables(transform_data=transform_data, apply_transforms=apply_transforms)
ac = Column(a, 'a')
bc = Column(b, 'b')
cc = Column(c, 'c')
return [
ac,
bc,
cc
]
def create_c_index(self) -> Index:
c_index = Index('c', dtype=StringType(categorical=True))
return c_index
def create_variables_and_c_colindex(self, transform_data: str = '', apply_transforms: bool = True
) -> Tuple[List[Variable], ColumnIndex]:
a, b, c = self.create_variables(transform_data=transform_data, apply_transforms=apply_transforms)
c_index = self.create_c_index()
c_col_index = ColumnIndex(c_index, [c])
return [a, b, c], c_col_index
def create_indexed_columns(self, transform_data: str = '', apply_transforms: bool = True) -> List[Column]:
(a, b, c), c_col_index = self.create_variables_and_c_colindex(
transform_data=transform_data, apply_transforms=apply_transforms
)
ac = Column(a, 'a', indices=[c_col_index])
bc = Column(b, 'b', indices=[c_col_index])
cc = Column(c, 'c')
return [
ac,
bc,
cc
]
class TestCreateSource(SourceTest):
def test_create_source_from_df(self):
ds = self.create_source(location=None)
assert_frame_equal(ds.df, self.test_df)
def test_create_source_from_file_path(self):
self.create_csv()
ds = self.create_source(df=None)
assert_frame_equal(ds.df, self.test_df)
def test_create_source_with_columns(self):
all_cols = self.create_columns()
ds = self.create_source(location=None, columns=all_cols)
assert ds.columns == all_cols
def test_graph(self):
ds = self.create_source(location=None)
# Need to do a better job with this test, see TestDataMergePipeline.test_graph
ds.graph
class TestLoadSource(SourceTest):
def test_load_with_columns(self):
self.create_csv()
all_cols = self.create_columns()
ds = self.create_source(df=None, columns=all_cols)
assert_frame_equal(ds.df, self.expect_loaded_df_rename_only)
def test_load_with_columns_subset(self):
self.create_csv()
all_cols = self.create_columns()
all_vars = self.create_variables()
var_subset = [var for var in all_vars if var.key != 'c']
ds = self.create_source(df=None, columns=all_cols, load_variables=var_subset)
assert_frame_equal(ds.df, self.expect_loaded_df_rename_only_a_b)
def test_with_with_repeated_variables_different_transforms(self):
self.create_csv()
all_cols = self.create_columns(transform_data='cell', apply_transforms=False)
a, b, c = self.create_variables(transform_data='cell', apply_transforms=False)
# First with original variable first, then transformation
load_variables = [
a,
a.add_one_cell(),
b,
c,
]
ds = self.create_source(df=None, columns=all_cols, load_variables=load_variables)
assert_frame_equal(ds.df, self.expect_loaded_df_with_a_and_a_transform)
all_cols = self.create_columns(transform_data='cell', apply_transforms=False)
a, b, c = self.create_variables(transform_data='cell', apply_transforms=False)
# Now with transformation first, then original variable
load_variables = [
a.add_one_cell(),
a,
b,
c,
]
ds = self.create_source(df=None, columns=all_cols, load_variables=load_variables)
assert_frame_equal(ds.df, self.expect_loaded_df_with_a_transform_and_a)
def test_load_with_repeated_variable_names_raises_error(self):
self.create_csv()
all_cols = self.create_columns()
all_cols.append(deepcopy(all_cols[2]))
with self.assertRaises(ValueError) as cm:
ds = self.create_source(df=None, columns=all_cols)
exc = cm.exception
assert 'variable name C repeated in load variables' in str(exc)
def test_with_columns_and_load_variables_with_transforms(self):
self.create_csv()
all_cols = self.create_columns(transform_data='cell', apply_transforms=False)
a, b, c = self.create_variables(transform_data='cell', apply_transforms=False)
load_variables = [
a.add_one_cell(),
b.add_one_cell(),
c
]
ds = self.create_source(df=None, columns=all_cols, load_variables=load_variables)
assert_frame_equal(ds.df, self.expect_loaded_df_with_transform)
def test_load_with_columns_and_transform_cell(self):
self.create_csv()
all_cols = self.create_columns(transform_data='cell')
ds = self.create_source(df=None, columns=all_cols)
assert_frame_equal(ds.df, self.expect_loaded_df_with_transform)
def test_load_with_columns_and_transform_series(self):
self.create_csv()
all_cols = self.create_columns(transform_data='series')
ds = self.create_source(df=None, columns=all_cols)
assert_frame_equal(ds.df, self.expect_loaded_df_with_transform)
def test_load_with_columns_and_transform_dataframe(self):
self.create_csv()
all_cols = self.create_columns(transform_data='dataframe')
ds = self.create_source(df=None, columns=all_cols)
assert_frame_equal(ds.df, self.expect_loaded_df_with_transform)
def test_load_with_columns_and_transform_source(self):
self.create_csv()
all_cols = self.create_columns(transform_data='source')
ds = self.create_source(df=None, columns=all_cols)
assert_frame_equal(ds.df, self.expect_loaded_df_with_transform)
def test_load_with_columns_transforms_and_pre_applied_transforms(self):
self.create_csv()
all_cols = self.create_columns(transform_data='cell')
a, b, c = self.create_variables(transform_data='cell')
all_cols[0] = Column(a, 'a', applied_transform_keys=['add_one_cell'])
ds = self.create_source(df=None, columns=all_cols)
assert_frame_equal(ds.df, self.expect_loaded_df_with_transform_and_a_pre_transformed)
def test_load_with_categorical(self):
self.create_csv()
all_cols = self.create_columns()
a, b, c = self.create_variables()
all_cols[2] = Column(c, 'c', dtype=StringType(categorical=True))
ds = self.create_source(df=None, columns=all_cols)
assert_frame_equal(ds.df, self.expect_loaded_df_categorical)
def test_load_with_datetime(self):
test_df = self.test_df.copy()
test_df['d'] = pd.to_datetime('1/1/2000')
self.create_csv(df=test_df)
expect_df = self.expect_loaded_df_rename_only.copy()
expect_df['Date'] = | pd.to_datetime('1/1/2000') | pandas.to_datetime |
from flask import render_template, request, session, redirect, url_for, jsonify
from app import app
from amshelper import AmsHelper
from datetime import datetime, timedelta
import pandas as pd
from dateutil.relativedelta import relativedelta
import json
@app.route('/')
@app.route('/index', methods=['GET'])
def index():
try:
model = session.pop('model')
except KeyError:
model = {
'player_visibility': 'none',
'error_message_visibility': 'none',
'streaming_url': '',
'asset_name': ''
}
return render_template('index.html', title='AMS Asset Player', model=model)
@app.route('/index', methods=['POST'])
def get_stream_for():
asset = request.form.get('amsAssetName')
streaming_url = AmsHelper().get_streaming_url(asset)
model = {
'player_visibility': 'display',
'error_message_visibility': 'none',
'streaming_url': streaming_url,
'asset_name': asset
}
session['model'] = model
return redirect(url_for('index'))
@app.route('/timeranges', methods=['GET'])
def get_time_ranges():
asset = request.args.get('assetName')
precision = request.args.get('precision')
start_time = request.args.get('startTime')
dt_start = datetime.now
dt_end = datetime.now
if precision == 'year':
start_time = '2019'
end_time = str(datetime.now().year)
elif precision == 'month':
year = int(start_time)
dt_start = datetime(year, 1, 1, 0, 0, 0, 0)
dt_end = datetime(year, 12, 1, 0, 0, 0, 0)
start_time = dt_start.strftime('%Y-%m')
end_time = dt_end.strftime('%Y-%m')
elif precision == 'day':
dt_start = pd.to_datetime(start_time)
dt_end = dt_start + relativedelta(months=1) - relativedelta(days=1)
start_time = dt_start.strftime('%Y-%m-%d')
end_time = dt_end.strftime('%Y-%m-%d')
elif precision == 'full':
dt_start = pd.to_datetime(start_time)
dt_end = dt_start + relativedelta(days=1)
start_time = dt_start.strftime("%Y-%m-%dT00:00:00")
end_time = dt_start.strftime("%Y-%m-%dT23:59:59")
available_time_ranges = AmsHelper().get_available_media_timeranges(
asset,
precision,
start_time,
end_time
)
result_string = AmsHelper.get_available_mediatime(available_time_ranges, precision)
return jsonify(result_string)
@app.route('/streamingurl', methods=['GET'])
def get_streaming_url():
asset = request.args.get('assetName')
precision = request.args.get('precision')
start_time = request.args.get('startTime')
end_time = request.args.get('endTime')
streaming_url = AmsHelper().get_streaming_url(asset)
if precision == 'full':
dt_start = datetime.now()
dt_start = | pd.to_datetime(start_time) | pandas.to_datetime |
import os
import pandas as pd
import datetime
import time
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.lines as mlines
import numpy as np
import sys
import random
from collections import OrderedDict
class Vis:
# TODO: Move all drawing helper functions to own file.
# TODO: TESTME: Write tests for this module
def get_color_from_label(label, color):
return color
def title_from_list(act_selection):
result=''
for act in act_selection:
result+=act+'_'
return result
def plot_newline(self, p1, p2):
ax = plt.gca()
xmin, xmax = ax.get_xbound()
if(p2[0] == p1[0]):
xmin = xmax = p1[0]
ymin, ymax = ax.get_ybound()
else:
ymax = p1[1]+(p2[1]-p1[1])/(p2[0]-p1[0])*(xmax-p1[0])
ymin = p1[1]+(p2[1]-p1[1])/(p2[0]-p1[0])*(xmin-p1[0])
l = mlines.Line2D([xmin,xmax], [ymin,ymax], color='grey', linestyle='--', linewidth=1)
ax.add_line(l)
return l
def draw_traces(self, data_selection, ax, draw_skylines=None):
unique_trace = data_selection['case'].unique().tolist()
colormapt = cm.gist_ncar
trace_colorlist = [colors.rgb2hex(colormapt(i)) for i in np.linspace(0, 0.9, len(unique_trace))]
trace_legend = dict(zip(unique_trace, trace_colorlist))
for j, k in enumerate(data_selection['case'].drop_duplicates()):
current = data_selection[data_selection['case']==k]
l = k
c = trace_legend.get(l)
if draw_skylines:
skyline = self.get_skyline_points(current)
ax.plot(skyline['num_start'], skyline['num_end'], label='skyline '+k, zorder=0, color=c)
else:
ax.plot(current['num_start'], current['num_end'], label='trace '+k, zorder=0, color=c)
def draw_allen_lines(allen_point, ax, yax, duration_plot=None):
x = allen_point['num_start'].values[0]
y = allen_point['num_end'].values[0]
if duration_plot:
ax.axvline(x, c='grey', linewidth=1, linestyle='--')
ax.axvline(x+y, c='grey', linewidth=1, linestyle='--')
plot_newline([x,y],[x+2*y,-y])
plot_newline([x-y,y],[x+y,-y])
else:
ax.plot([x,x],[x,yax],'k-', c='grey', linewidth=1, linestyle='--')
ax.plot([y,y],[y,yax],'k-', c='grey', linewidth=1, linestyle='--')
ax.plot([0,x],[x,x],'k-', c='grey', linewidth=1, linestyle='--')
ax.plot([0,y],[y,y],'k-', c='grey', linewidth=1, linestyle='--')
def plot_point_transformer(self, title, data_selection, activity=None, traces=None, allen_point=None, size=None,
duration_plot=None, draw_skylines=None, output_path=None, show_plot=None):
def get_time_list_from_seconds(list):
result = []
for item in list:
if item < 0:
result.append('')
else:
result.append(datetime.timedelta(seconds=item))
return result
def sort_dict(d):
items = [[k, v] for k, v in sorted(d.items(), key=lambda x: x[0])]
for item in items:
if isinstance(item[1], dict):
item[1] = sort_dict(item[1])
return OrderedDict(items)
def multiline_text(text, max_in_line):
many_lines = []
result=''
i=0
while len(text)>max_in_line and i==0:
many_lines.append(text[0:max_in_line+1])
text = text[max_in_line+1:]
if len(text):
many_lines.append(text)
for item in many_lines:
result += item + '\n'
return result
fig, ax = plt.subplots()
if size:
fig.set_size_inches(18.5, 18.5)
#colormap = cm.nipy_spectral
#colormap = cm.prism
#colormap = cm.tab20
colormap = cm.hsv
#colormap = cm.gist_rainbow
#colormap = cm.gist_ncar
unique_act = sorted(data_selection['activity'].unique().tolist())
unique_trace = data_selection['case'].unique().tolist()
colorlist = [colors.rgb2hex(colormap(i)) for i in np.linspace(0, 0.9, len(unique_act))]
legend = dict(zip(unique_act, colorlist))
colorby = 'activity'
if activity:
data_selection = data_selection.loc[data_selection['activity']==activity].reset_index()
colorlist = [colors.rgb2hex(colormap(i)) for i in np.linspace(0, 0.9, len(unique_trace))]
legend = dict(zip(unique_trace, colorlist))
colorby = 'case'
elif traces:
data_selection = data_selection.loc[data_selection['case'].isin(traces)].reset_index()
for i, e in enumerate(data_selection['num_start']):
x = data_selection['num_start'][i]
y = data_selection['num_end'][i]
l = data_selection[colorby][i]
c = legend.get(l)
ax.scatter(x, y, label=l, s=50, linewidth=0.1, c=c)
yin, yax= ax.get_ylim()
xin, xax= ax.get_xlim()
ax.set_xlim(xmin=0)
ax.set_ylim(ymin=0)
if not duration_plot:
#Draw diagonal
self.plot_newline([0,0],[max(xax,yax),max(xax,yax)])
ax.set_ylabel('End time')
else:
ax.set_ylabel('Duration')
if traces:
self.draw_traces(data_selection, ax, draw_skylines=draw_skylines)
# if not allen_point is None :# Weird if statement because of maybe empty object or dataframe
# draw_allen_lines(allen_point, ax, yax, duration_plot=duration_plot)
ax.legend()
ax.set_xlabel('Start time')
xlocs, labels = plt.xticks()
ylocs, labels = plt.yticks()
plt.xticks(xlocs[1:], get_time_list_from_seconds(xlocs[1:]),rotation='vertical')
plt.yticks(ylocs[1:], get_time_list_from_seconds(ylocs[1:]))
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
by_label = sort_dict(by_label)
#if len(by_label)>50:
# plt.legend(by_label.values(), by_label.keys(), loc='center left', bbox_to_anchor=(1, 0.5), ncol=2)
#else:
plt.legend(by_label.values(), by_label.keys(), loc='center left', bbox_to_anchor=(1, 0.5))
#plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.title(multiline_text(title, 175))
if output_path:
print('Saving in ',output_path)
fig.savefig(output_path, bbox_inches='tight')
if show_plot:
plt.show()
plt.close(fig)
return fig
def get_duration(start_time, end_time):
start = datetime.datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')
end = datetime.datetime.strptime(end_time, '%Y-%m-%d %H:%M:%S')
duration = abs(end - start)
return duration
#get_duration(ex['timestamp'][10],ex['timestamp'][1])
def get_data_selection_avgtrace(self, df):
def get_average_times(group):
def avg_datetime(series):
averages = (series.sum())/len(series)
#averages = time.strftime('%H:%M:%S', time.gmtime(averages))
return averages
group['average_start'] = time.strftime('%H:%M:%S', time.gmtime(avg_datetime(group['num_start'])))
group['average_end'] = time.strftime('%H:%M:%S', time.gmtime(avg_datetime(group['num_end'])))
group['num_start'] = avg_datetime(group['num_start'])
group['num_end'] = avg_datetime(group['num_end'])
group['std_num_end'] = group['num_end'].std()
return group
average_trace = df[['case','activity','num_start','num_end']].iloc[: , :]
average_trace = average_trace.groupby(['activity'])
average_trace = average_trace.apply(get_average_times)
average_trace = average_trace.drop_duplicates('activity', keep='first').reset_index()
average_trace['case'] = 'Average Case'
average_trace = average_trace[['activity','average_start', 'average_end','num_start','num_end', 'case', 'std_num_end']].sort_values(by=['num_start'])
return average_trace
def get_skyline_points(self, df):
df = df.reset_index()
df.sort_values(by=['num_start'])
skyline = pd.DataFrame()
for unique_case in df['case'].unique():
max_x = []
max_y = []
activity = []
case = []
iter_case = df[df['case']==unique_case]
for i in range(len(iter_case)):
maxi = max(iter_case['num_start'][0:i+1].values.tolist())
mayi = max(iter_case['num_end'][0:i+1].values.tolist())
#print(e, maxi, mayi)
if maxi in iter_case[iter_case['num_end']==mayi]['num_start'].values:
max_x.append(maxi)
max_y.append(mayi)
activity.append(iter_case['activity'].iloc[i])
case.append(iter_case['case'].iloc[i])
skyline = pd.concat([skyline, pd.DataFrame({'num_start':max_x, 'num_end':max_y, 'activity': activity, 'case': case})])
skyline = skyline.drop_duplicates().reset_index()[['num_start','num_end','activity','case']]
return skyline
#first_case = snippet.loc[snippet['case']==snippet['case'][0]].reset_index()
#get_skyline_points(first_case).head()
def get_duration(self, start_time, end_time):
start = datetime.datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')
end = datetime.datetime.strptime(end_time, '%Y-%m-%d %H:%M:%S')
duration = abs(end - start)
return duration
#get_duration(ex['timestamp'][10],ex['timestamp'][1])
def get_relative_timestamps(self, df, exclude_tasks=[]):
def get_zero_points(group):
group['zero_point'] = group['start_time'].min()
return group
#WIP for failing test: relatived['zero_points']=relatived['start_time'].groupby(relatived['case']).transform('min')
relatived = df.copy()
#print('First timestamp in dataframe ', df['start_time'].min())
#print('Last timestamp in dataframe ',df['end_time'].max())
grouped = df.groupby(['case'])
grouped = grouped.apply(get_zero_points)
#print('Grouped:', len(grouped), 'columns', grouped.columns.tolist())
relatived = pd.merge(grouped, relatived, on = ['case', 'activity', 'start_time', 'end_time'], how = 'inner')
#print('Merged relatived:', len(relatived), 'columns', relatived.columns.tolist())
excluding = exclude_tasks
relatived['rel_start'] = relatived.apply(lambda row:
str(self.get_duration(str(row['zero_point']),
str(row['start_time']))), axis=1)
relatived['rel_end'] = relatived.apply(lambda row:
str(self.get_duration(str(row['zero_point']),
str(row['end_time']))), axis=1)
relatived['num_start']= list( | pd.to_timedelta(relatived['rel_start'], errors="coerce") | pandas.to_timedelta |
import pandas as pd
import numpy as np
import matplotlib as plt
pd.set_option('display.max_columns', None)
df=pd.read_csv('train_HK6lq50.csv')
def train_data_preprocess(df,train,test):
df['trainee_engagement_rating'].fillna(value=1.0,inplace=True)
df['isage_null']=0
df.isage_null[df.age.isnull()]=1
df['age'].fillna(value=0,inplace=True)
#new cols actual_programs_enrolled and total_test_taken
total=train.append(test)
unique_trainee=pd.DataFrame(total.trainee_id.value_counts())
unique_trainee['trainee_id']=unique_trainee.index
value=[]
for i in unique_trainee.trainee_id:
value.append(len(total[total.trainee_id==i].program_id.unique()))
unique_trainee['actual_programs_enrolled']=value
dic1=dict(zip(unique_trainee['trainee_id'],unique_trainee['actual_programs_enrolled']))
df['actual_programs_enrolled']=df['trainee_id'].map(dic1).astype(int)
value=[]
for i in unique_trainee.trainee_id:
value.append(len(total[total.trainee_id==i].test_id.unique()))
unique_trainee['total_test_taken']=value
dic2=dict(zip(unique_trainee['trainee_id'],unique_trainee['total_test_taken']))
df['total_test_taken']=df['trainee_id'].map(dic2).astype(int)
#new col total_trainee_in_each_test
unique_test=pd.DataFrame(total.test_id.value_counts())
unique_test['test_id']=unique_test.index
value=[]
for i in unique_test.test_id:
value.append(len(total[total.test_id==i].trainee_id.unique()))
unique_test['total_trainee_in_each_test']=value
dic3=dict(zip(unique_test['test_id'],unique_test['total_trainee_in_each_test']))
df['total_trainee_in_each_test']=df['test_id'].map(dic3).astype(int)
#LABEL ENCODING
test_type=sorted(df['test_type'].unique())
test_type_mapping=dict(zip(test_type,range(1,len(test_type)+1)))
df['test_type_val']=df['test_type'].map(test_type_mapping).astype(int)
df.drop('test_type',axis=1,inplace=True)
program_type=sorted(df['program_type'].unique())
program_type_mapping=dict(zip(program_type,range(1,len(program_type)+1)))
df['program_type_val']=df['program_type'].map(program_type_mapping).astype(int)
df.drop('program_type',axis=1,inplace=True)
program_id=sorted(df['program_id'].unique())
program_id_mapping=dict(zip(program_id,range(1,len(program_id)+1)))
df['program_id_val']=df['program_id'].map(program_id_mapping).astype(int)
#df.drop('program_id',axis=1,inplace=True)
difficulty_level=['easy','intermediate','hard','vary hard']
difficulty_level_mapping=dict(zip(difficulty_level,range(1,len(difficulty_level)+1)))
df['difficulty_level_val']=df['difficulty_level'].map(difficulty_level_mapping).astype(int)
df.drop('difficulty_level',axis=1,inplace=True)
education=['No Qualification','High School Diploma','Matriculation','Bachelors','Masters']
educationmapping=dict(zip(education,range(1,len(education)+1)))
df['education_val']=df['education'].map(educationmapping).astype(int)
df.drop('education',axis=1,inplace=True)
is_handicapped=sorted(df['is_handicapped'].unique())
is_handicappedmapping=dict(zip(is_handicapped,range(1,len(is_handicapped)+1)))
df['is_handicapped_val']=df['is_handicapped'].map(is_handicappedmapping).astype(int)
df.drop('is_handicapped',axis=1,inplace=True)
#creating new program_id group based on is_pass percentage
df['new_program_id_group']=pd.DataFrame(df['program_id'])
df.loc[(df.new_program_id_group=='X_1')|(df.new_program_id_group=='X_3'),'new_program_id_group']=1
df.loc[(df.new_program_id_group=='Y_1')|(df.new_program_id_group=='Y_2')|(df.new_program_id_group=='Y_3')|(df.new_program_id_group=='Y_4')|(df.new_program_id_group=='X_2'),'new_program_id_group']=2
df.loc[(df.new_program_id_group=='Z_1')|(df.new_program_id_group=='Z_2')|(df.new_program_id_group=='Z_3')|(df.new_program_id_group=='T_2')|(df.new_program_id_group=='T_3')|(df.new_program_id_group=='T_4'),'new_program_id_group']=3
df.loc[(df.new_program_id_group=='U_1'),'new_program_id_group']=4
df.loc[(df.new_program_id_group=='V_1')|(df.new_program_id_group=='U_2'),'new_program_id_group']=5
df.loc[(df.new_program_id_group=='V_3')|(df.new_program_id_group=='S_2')|(df.new_program_id_group=='V_4')|(df.new_program_id_group=='V_2'),'new_program_id_group']=6
df.loc[(df.new_program_id_group=='T_1')|(df.new_program_id_group=='S_1'),'new_program_id_group']=7
df.drop('program_id',axis=1,inplace=True)
#creating col test_id and rating category together
train=pd.read_csv('train_HK6lq50.csv')
test=pd.read_csv('test_2nAIblo.csv')
total=train.append(test)
count=0
total['test_id_and_rating']=0
for a in total.trainee_engagement_rating.unique():
for b in total.test_id.unique():
count+=1
total.loc[(total.trainee_engagement_rating==a)&(total.test_id==b),'test_id_and_rating']=count
dic=dict(zip(total['id'],total['test_id_and_rating']))
df['test_id_and_rating']=df['id'].map(dic)
count=0
total['test_id_and_education']=0
for a in total.education.unique():
for b in total.test_id.unique():
count+=1
total.loc[(total.education==a)&(total.test_id==b),'test_id_and_education']=count
dic=dict(zip(total['id'],total['test_id_and_education']))
df['test_id_and_education']=df['id'].map(dic)
count=0
total['program_type_and_rating']=0
for a in total.trainee_engagement_rating.unique():
for b in total.program_type.unique():
count+=1
total.loc[(total.trainee_engagement_rating==a)&(total.program_type==b),'program_type_and_rating']=count
dic=dict(zip(total['id'],total['program_type_and_rating']))
df['program_type_and_rating']=df['id'].map(dic)
#grouping of test_id_and_rating
c=pd.crosstab(df.test_id_and_rating,df.is_pass)
c_pct=c.div(c.sum(1).astype(float),axis=0)
c_pct.columns = ['fail', 'pass']
c_pct['id_group']=pd.DataFrame(c_pct['pass'])
c_pct.loc[(c_pct.id_group>=.20)&(c_pct.id_group<.30),'id_group']=1
c_pct.loc[(c_pct.id_group>=.30)&(c_pct.id_group<.40),'id_group']=2
c_pct.loc[(c_pct.id_group>=.40)&(c_pct.id_group<.50),'id_group']=3
c_pct.loc[(c_pct.id_group>=.50)&(c_pct.id_group<.60),'id_group']=4
c_pct.loc[(c_pct.id_group>=.60)&(c_pct.id_group<.70),'id_group']=5
c_pct.loc[(c_pct.id_group>=.70)&(c_pct.id_group<.80),'id_group']=6
c_pct.loc[(c_pct.id_group>=.80)&(c_pct.id_group<.90),'id_group']=7
c_pct.loc[(c_pct.id_group>=.90)&(c_pct.id_group<1),'id_group']=8
c_pct.id_group=c_pct.id_group.astype(int)
c_pct.drop(['fail','pass'],axis=1,inplace=True)
dic=c_pct.to_dict()
dic4=dic['id_group']
df['test_id_and_rating_group']=df['test_id_and_rating'].map(dic4).astype(int)
#grouping of program_type_and_rating
c=pd.crosstab(df.program_type_and_rating,df.is_pass)
c_pct=c.div(c.sum(1).astype(float),axis=0)
c_pct.columns = ['fail', 'pass']
c_pct['id_group']=pd.DataFrame(c_pct['pass'])
c_pct.loc[(c_pct.id_group>=.20)&(c_pct.id_group<.30),'id_group']=1
c_pct.loc[(c_pct.id_group>=.30)&(c_pct.id_group<.40),'id_group']=2
c_pct.loc[(c_pct.id_group>=.40)&(c_pct.id_group<.50),'id_group']=3
c_pct.loc[(c_pct.id_group>=.50)&(c_pct.id_group<.60),'id_group']=4
c_pct.loc[(c_pct.id_group>=.60)&(c_pct.id_group<.70),'id_group']=5
c_pct.loc[(c_pct.id_group>=.70)&(c_pct.id_group<.80),'id_group']=6
c_pct.loc[(c_pct.id_group>=.80)&(c_pct.id_group<.90),'id_group']=7
c_pct.loc[(c_pct.id_group>=.90)&(c_pct.id_group<1),'id_group']=8
c_pct.id_group=c_pct.id_group.astype(int)
c_pct.drop(['fail','pass'],axis=1,inplace=True)
dic=c_pct.to_dict()
dic41=dic['id_group']
df['program_type_and_rating_group']=df['program_type_and_rating'].map(dic41).astype(int)
#col avg_rating by test_id
total=train.append(test)
c=pd.crosstab(total.test_id,total.trainee_engagement_rating) #use this for final submission
c['avg_rating']=(c[1.0]+2*c[2.0]+3*c[3.0]+4*c[4.0]+5*c[5.0])/(c[1.0]+c[2.0]+c[3.0]+c[4.0]+c[5.0])
c['test_id']=c.index
dic5=dict(zip(c['test_id'],c['avg_rating']))
df['avg_rating']=df['test_id'].map(dic5)
#rating_diff(count(1.0+2.0)-count(4.0+5.0))
#c=pd.crosstab(total.test_id,total.trainee_engagement_rating) #use this for final submission
c=pd.crosstab(df.test_id,df.trainee_engagement_rating)
c['rating_diff_test_id']=c[1.0]+c[2.0]-c[4.0]-c[5.0]+c[3.0]
c['test_id']=c.index
dic6=dict(zip(c['test_id'],c['rating_diff_test_id']))
df['rating_diff_test_id']=df['test_id'].map(dic6)
#col avg_rating by trainee_id
#c=pd.crosstab(total.test_id,total.trainee_engagement_rating) #use this for final submission
c=pd.crosstab(df.trainee_id,df.trainee_engagement_rating)
c['avg_rating_trainee_id']=(c[1.0]+2*c[2.0]+3*c[3.0]+4*c[4.0]+5*c[5.0])/(c[1.0]+c[2.0]+c[3.0]+c[4.0]+c[5.0])
c['trainee_id']=c.index
dic7=dict(zip(c['trainee_id'],c['avg_rating_trainee_id']))
df['avg_rating_trainee_id']=df['trainee_id'].map(dic7)
#is_pass_diff wrt trainee_engagement_rating
c=pd.crosstab(df.trainee_engagement_rating,df.is_pass)
c['trainee_engagement_rating']=c.index
c['pass']=c[1]
c['fail']=c[0]
c['is_pass_diff_rating']=c['pass']-c['fail']
dic8=dict(zip(c['trainee_engagement_rating'],c['is_pass_diff_rating']))
df['is_pass_diff_rating']=df['trainee_engagement_rating'].map(dic8).astype(int)
#is_pass_diff wrt total_programs_enrolled
c=pd.crosstab(df.total_programs_enrolled,df.is_pass)
c['total_programs_enrolled']=c.index
c['pass']=c[1]
c['fail']=c[0]
c['is_pass_diff_total_programs_enrolled']=c['pass']-c['fail']
dic9=dict(zip(c['total_programs_enrolled'],c['is_pass_diff_total_programs_enrolled']))
df['is_pass_diff_total_programs_enrolled']=df['total_programs_enrolled'].map(dic9).astype(int)
#is_pass_diff wrt difficulty_level_val
c=pd.crosstab(df.difficulty_level_val,df.is_pass)
c['difficulty_level_val']=c.index
c['pass']=c[1]
c['fail']=c[0]
c['is_pass_diff_difficulty_level']=c['pass']-c['fail']
dic10=dict(zip(c['difficulty_level_val'],c['is_pass_diff_difficulty_level']))
df['is_pass_diff_difficulty_level']=df['difficulty_level_val'].map(dic10).astype(int)
#is_pass_diff wrt education_val
c=pd.crosstab(df.education_val,df.is_pass)
c['education_val']=c.index
c['pass']=c[1]
c['fail']=c[0]
c['is_pass_diff_education']=c['pass']-c['fail']
dic11=dict(zip(c['education_val'],c['is_pass_diff_education']))
df['is_pass_diff_education']=df['education_val'].map(dic11).astype(int)
#is_pass_diff wrt city_tier
c=pd.crosstab(df.city_tier,df.is_pass)
c['city_tier']=c.index
c['pass']=c[1]
c['fail']=c[0]
c['is_pass_diff_city_tier']=c['pass']-c['fail']
dic12=dict(zip(c['city_tier'],c['is_pass_diff_city_tier']))
df['is_pass_diff_city_tier']=df['city_tier'].map(dic12).astype(int)
#is_pass_diff wrt new_program_id_group
c=pd.crosstab(df.new_program_id_group,df.is_pass)
c['new_program_id_group']=c.index
c['pass']=c[1]
c['fail']=c[0]
c['is_pass_diff_new_program_id_group']=c['pass']-c['fail']
dic13=dict(zip(c['new_program_id_group'],c['is_pass_diff_new_program_id_group']))
df['is_pass_diff_new_program_id_group']=df['new_program_id_group'].map(dic13).astype(int)
#is_pass_diff wrt program_id_val
c=pd.crosstab(df.program_id_val,df.is_pass)
c['program_id_val']=c.index
c['pass']=c[1]
c['fail']=c[0]
c['is_pass_diff_program_id_val']=c['pass']-c['fail']
dic14=dict(zip(c['program_id_val'],c['is_pass_diff_program_id_val']))
df['is_pass_diff_program_id_val']=df['program_id_val'].map(dic14).astype(int)
#is_pass_diff wrt program_duration
c=pd.crosstab(df.program_duration,df.is_pass)
c['program_duration']=c.index
c['pass']=c[1]
c['fail']=c[0]
c['is_pass_diff_program_duration']=c['pass']-c['fail']
dic15=dict(zip(c['program_duration'],c['is_pass_diff_program_duration']))
df['is_pass_diff_program_duration']=df['program_duration'].map(dic15).astype(int)
#is_pass_diff wrt total_test_taken
c=pd.crosstab(df.total_test_taken,df.is_pass)
c['total_test_taken']=c.index
c['pass']=c[1]
c['fail']=c[0]
c['is_pass_diff_total_test_taken']=c['pass']-c['fail']
dic16=dict(zip(c['total_test_taken'],c['is_pass_diff_total_test_taken']))
df['is_pass_diff_total_test_taken']=df['total_test_taken'].map(dic16).astype(int)
#is_pass_diff wrt test_type_val
c=pd.crosstab(df.test_type_val,df.is_pass)
c['test_type_val']=c.index
c['pass']=c[1]
c['fail']=c[0]
c['is_pass_diff_test_type_val']=c['pass']-c['fail']
dic17=dict(zip(c['test_type_val'],c['is_pass_diff_test_type_val']))
df['is_pass_diff_test_type_val']=df['test_type_val'].map(dic17).astype(int)
#is_pass_diff wrt program_type_val
c=pd.crosstab(df.program_type_val,df.is_pass)
c['program_type_val']=c.index
c['pass']=c[1]
c['fail']=c[0]
c['is_pass_diff_program_type_val']=c['pass']-c['fail']
dic18=dict(zip(c['program_type_val'],c['is_pass_diff_program_type_val']))
df['is_pass_diff_program_type_val']=df['program_type_val'].map(dic18).astype(int)
#is_pass_diff wrt total_trainee_in_each_test
c=pd.crosstab(df.total_trainee_in_each_test,df.is_pass)
c['total_trainee_in_each_test']=c.index
c['pass']=c[1]
c['fail']=c[0]
c['is_pass_diff_total_trainee_in_each_test']=c['pass']-c['fail']
dic19=dict(zip(c['total_trainee_in_each_test'],c['is_pass_diff_total_trainee_in_each_test']))
df['is_pass_diff_total_trainee_in_each_test']=df['total_trainee_in_each_test'].map(dic19).astype(int)
#grouping for test_id
c=pd.crosstab(df.test_id,df.is_pass)
c_pct=c.div(c.sum(1).astype(float),axis=0)
c_pct.columns = ['fail', 'pass']
c_pct['id_group']=pd.DataFrame(c_pct['pass'])
c_pct.loc[(c_pct.id_group>=.20)&(c_pct.id_group<.30),'id_group']=1
c_pct.loc[(c_pct.id_group>=.30)&(c_pct.id_group<.40),'id_group']=2
c_pct.loc[(c_pct.id_group>=.40)&(c_pct.id_group<.50),'id_group']=3
c_pct.loc[(c_pct.id_group>=.50)&(c_pct.id_group<.60),'id_group']=4
c_pct.loc[(c_pct.id_group>=.60)&(c_pct.id_group<.70),'id_group']=5
c_pct.loc[(c_pct.id_group>=.70)&(c_pct.id_group<.80),'id_group']=6
c_pct.loc[(c_pct.id_group>=.80)&(c_pct.id_group<.90),'id_group']=7
c_pct.loc[(c_pct.id_group>=.90)&(c_pct.id_group<1),'id_group']=8
c_pct.id_group=c_pct.id_group.astype(int)
c_pct.drop(['fail','pass'],axis=1,inplace=True)
dic=c_pct.to_dict()
dic20=dic['id_group']
df['test_id_group']=df['test_id'].map(dic20).astype(int)
#grouping for trainee_id
c=pd.crosstab(df.trainee_id,df.is_pass)
c_pct=c.div(c.sum(1).astype(float),axis=0)
c_pct.columns = ['fail', 'pass']
c_pct['id_group']=pd.DataFrame(c_pct['pass'])
c_pct.loc[(c_pct.id_group>=0)&(c_pct.id_group<.20),'id_group']=1
c_pct.loc[(c_pct.id_group>=.20)&(c_pct.id_group<.40),'id_group']=2
c_pct.loc[(c_pct.id_group>=.40)&(c_pct.id_group<.60),'id_group']=3
c_pct.loc[(c_pct.id_group>=.60)&(c_pct.id_group<.80),'id_group']=4
c_pct.loc[(c_pct.id_group>=.80)&(c_pct.id_group<=1),'id_group']=5
c_pct.id_group=c_pct.id_group.astype(int)
c_pct.drop(['fail','pass'],axis=1,inplace=True)
dic=c_pct.to_dict()
dic21=dic['id_group']
df['trainee_id_group']=df['trainee_id'].map(dic21)
#is_pass_diff wrt trainee_id
c=pd.crosstab(df.trainee_id,df.is_pass)
c['trainee_id']=c.index
c['pass']=c[1]
c['fail']=c[0]
c['is_pass_diff']=c['pass']-c['fail']
dic22=dict(zip(c['trainee_id'],c['is_pass_diff']))
df['is_pass_diff']=df['trainee_id'].map(dic22)
#is_pass_diff2 wrt test_id
c=pd.crosstab(df.test_id,df.is_pass)
c['test_id']=c.index
c['pass']=c[1]
c['fail']=c[0]
c['is_pass_diff2']=c['pass']-c['fail']
dic23=dict(zip(c['test_id'],c['is_pass_diff2']))
df['is_pass_diff2']=df['test_id'].map(dic23)
col=['program_duration', 'city_tier', 'total_programs_enrolled',
'trainee_engagement_rating', 'isage_null', 'test_type_val',
'program_id_val', 'difficulty_level_val',
'education_val', 'is_handicapped_val',
'trainee_engagement_rating_mean_target','new_program_id_group']
mean_enc=[]
for i in col:
means=df.groupby(i).is_pass.mean()
df[i+'_mean_target']=df[i].map(means)
df.drop(i,axis=1,inplace=True)
mean_enc.append(means)
df.drop('is_pass',axis=1,inplace=True)
df.drop(['id','gender'],axis=1,inplace=True)
dic_all=[dic1,dic2,dic3,dic4,dic41,dic5,dic6,dic7,dic8,dic9,dic10,dic11,dic12,dic13,dic14,dic15,dic16,dic17,dic18,dic19,dic20,dic21,dic22,dic23]
return(df,dic_all,mean_enc)
def test_data_preprocess(df,train,test,dic_all,mean_enc):
(dic1,dic2,dic3,dic4,dic41,dic5,dic6,dic7,dic8,dic9,dic10,dic11,dic12,dic13,dic14,dic15,dic16,dic17,dic18,dic19,dic20,dic21,dic22,dic23)=dic_all
df['trainee_engagement_rating'].fillna(value=1.0,inplace=True)
df['isage_null']=0
df.isage_null[df.age.isnull()]=1
df['age'].fillna(value=0,inplace=True)
#new cols actual_programs_enrolled and total_test_taken
df['actual_programs_enrolled']=df['trainee_id'].map(dic1).astype(int)
df['total_test_taken']=df['trainee_id'].map(dic2).astype(int)
#new col total_trainee_in_each_test
df['total_trainee_in_each_test']=df['test_id'].map(dic3).astype(int)
#LABEL ENCODING
test_type=sorted(df['test_type'].unique())
test_type_mapping=dict(zip(test_type,range(1,len(test_type)+1)))
df['test_type_val']=df['test_type'].map(test_type_mapping).astype(int)
df.drop('test_type',axis=1,inplace=True)
program_type=sorted(df['program_type'].unique())
program_type_mapping=dict(zip(program_type,range(1,len(program_type)+1)))
df['program_type_val']=df['program_type'].map(program_type_mapping).astype(int)
df.drop('program_type',axis=1,inplace=True)
program_id=sorted(df['program_id'].unique())
program_id_mapping=dict(zip(program_id,range(1,len(program_id)+1)))
df['program_id_val']=df['program_id'].map(program_id_mapping).astype(int)
#df.drop('program_id',axis=1,inplace=True)
difficulty_level=['easy','intermediate','hard','vary hard']
difficulty_level_mapping=dict(zip(difficulty_level,range(1,len(difficulty_level)+1)))
df['difficulty_level_val']=df['difficulty_level'].map(difficulty_level_mapping).astype(int)
df.drop('difficulty_level',axis=1,inplace=True)
education=['No Qualification','High School Diploma','Matriculation','Bachelors','Masters']
educationmapping=dict(zip(education,range(1,len(education)+1)))
df['education_val']=df['education'].map(educationmapping).astype(int)
df.drop('education',axis=1,inplace=True)
is_handicapped=sorted(df['is_handicapped'].unique())
is_handicappedmapping=dict(zip(is_handicapped,range(1,len(is_handicapped)+1)))
df['is_handicapped_val']=df['is_handicapped'].map(is_handicappedmapping).astype(int)
df.drop('is_handicapped',axis=1,inplace=True)
#creating new program_id group based on is_pass percentage
df['new_program_id_group']=pd.DataFrame(df['program_id'])
df.loc[(df.new_program_id_group=='X_1')|(df.new_program_id_group=='X_3'),'new_program_id_group']=1
df.loc[(df.new_program_id_group=='Y_1')|(df.new_program_id_group=='Y_2')|(df.new_program_id_group=='Y_3')|(df.new_program_id_group=='Y_4')|(df.new_program_id_group=='X_2'),'new_program_id_group']=2
df.loc[(df.new_program_id_group=='Z_1')|(df.new_program_id_group=='Z_2')|(df.new_program_id_group=='Z_3')|(df.new_program_id_group=='T_2')|(df.new_program_id_group=='T_3')|(df.new_program_id_group=='T_4'),'new_program_id_group']=3
df.loc[(df.new_program_id_group=='U_1'),'new_program_id_group']=4
df.loc[(df.new_program_id_group=='V_1')|(df.new_program_id_group=='U_2'),'new_program_id_group']=5
df.loc[(df.new_program_id_group=='V_3')|(df.new_program_id_group=='S_2')|(df.new_program_id_group=='V_4')|(df.new_program_id_group=='V_2'),'new_program_id_group']=6
df.loc[(df.new_program_id_group=='T_1')|(df.new_program_id_group=='S_1'),'new_program_id_group']=7
df.drop('program_id',axis=1,inplace=True)
#creating col test_id and rating category
total=train.append(test)
count=0
total['test_id_and_rating']=0
for a in total.trainee_engagement_rating.unique():
for b in total.test_id.unique():
count+=1
total.loc[(total.trainee_engagement_rating==a)&(total.test_id==b),'test_id_and_rating']=count
dic=dict(zip(total['id'],total['test_id_and_rating']))
df['test_id_and_rating']=df['id'].map(dic)
count=0
total['test_id_and_education']=0
for a in total.education.unique():
for b in total.test_id.unique():
count+=1
total.loc[(total.education==a)&(total.test_id==b),'test_id_and_education']=count
dic=dict(zip(total['id'],total['test_id_and_education']))
df['test_id_and_education']=df['id'].map(dic)
count=0
total['program_type_and_rating']=0
for a in total.trainee_engagement_rating.unique():
for b in total.program_type.unique():
count+=1
total.loc[(total.trainee_engagement_rating==a)&(total.program_type==b),'program_type_and_rating']=count
dic=dict(zip(total['id'],total['program_type_and_rating']))
df['program_type_and_rating']=df['id'].map(dic)
#grouping of test_id_and_rating
df['test_id_and_rating_group']=df['test_id_and_rating'].map(dic4)
#grouping of program_type_and_rating
df['program_type_and_rating_group']=df['program_type_and_rating'].map(dic41).astype(int)
#col avg_rating by test_id
df['avg_rating']=df['test_id'].map(dic5)
#rating_diff(count(1.0+2.0)-count(4.0+5.0))
#c=pd.crosstab(total.test_id,total.trainee_engagement_rating) #use this for final submission
df['rating_diff_test_id']=df['test_id'].map(dic6)
#col avg_rating by trainee_id
#c=pd.crosstab(total.test_id,total.trainee_engagement_rating) #use this for final submission
df['avg_rating_trainee_id']=df['trainee_id'].map(dic7)
#is_pass_diff wrt trainee_engagement_rating
df['is_pass_diff_rating']=df['trainee_engagement_rating'].map(dic8).astype(int)
#is_pass_diff wrt total_programs_enrolled
df['is_pass_diff_total_programs_enrolled']=df['total_programs_enrolled'].map(dic9).astype(int)
#is_pass_diff wrt difficulty_level_val
df['is_pass_diff_difficulty_level']=df['difficulty_level_val'].map(dic10).astype(int)
#is_pass_diff wrt education_val
df['is_pass_diff_education']=df['education_val'].map(dic11).astype(int)
#is_pass_diff wrt city_tier
df['is_pass_diff_city_tier']=df['city_tier'].map(dic12).astype(int)
#is_pass_diff wrt new_program_id_group
df['is_pass_diff_new_program_id_group']=df['new_program_id_group'].map(dic13).astype(int)
#is_pass_diff wrt program_id_val
df['is_pass_diff_program_id_val']=df['program_id_val'].map(dic14).astype(int)
#is_pass_diff wrt program_duration
df['is_pass_diff_program_duration']=df['program_duration'].map(dic15).astype(int)
#is_pass_diff wrt total_test_taken
df['is_pass_diff_total_test_taken']=df['total_test_taken'].map(dic16).astype(int)
#is_pass_diff wrt test_type_val
df['is_pass_diff_test_type_val']=df['test_type_val'].map(dic17).astype(int)
#is_pass_diff wrt program_type_val
df['is_pass_diff_program_type_val']=df['program_type_val'].map(dic18).astype(int)
#is_pass_diff wrt total_trainee_in_each_test
df['is_pass_diff_total_trainee_in_each_test']=df['total_trainee_in_each_test'].map(dic19).astype(int)
#grouping for test_id
df['test_id_group']=df['test_id'].map(dic20).astype(int)
#grouping for trainee_id
df['trainee_id_group']=df['trainee_id'].map(dic21)
#is_pass_diff wrt trainee_id
df['is_pass_diff']=df['trainee_id'].map(dic22)
#is_pass_diff2 wrt test_id
df['is_pass_diff2']=df['test_id'].map(dic23)
#TARGET ENCODING
col=['program_duration', 'city_tier', 'total_programs_enrolled',
'trainee_engagement_rating', 'isage_null', 'test_type_val',
'program_id_val', 'difficulty_level_val',
'education_val', 'is_handicapped_val',
'trainee_engagement_rating_mean_target','new_program_id_group']
j=0
for i in col:
df[i+'_mean_target']=df[i].map(mean_enc[j])
df.drop(i,axis=1,inplace=True)
j+=1
df.drop(['id','gender'],axis=1,inplace=True)
return(df)
df=pd.read_csv('train_HK6lq50.csv')
train= | pd.read_csv('train_HK6lq50.csv') | pandas.read_csv |
import torch
import time
import numpy as np
from .utils import accuracy_onehot, save_model
from sklearn.metrics import confusion_matrix
import pandas as pd
import copy
def train(model, optimizer, criterion, train_dl, test_dl,
N_epochs : int, batch_size : int, history=None, history_model_state=[],
fold=None, name=None, savedir=None, cfg=None, accuracy=None):
"""Trains the model.
Parameters
----------
model :
The model to be trained
optimizer :
The pytorch optimizer used to perform training
criterion :
The pytorch loss function
train_dl :
The training dataset data loader
test_dl :
The testing dataset data loader
N_epochs : int
Number of epochs to perform training
batch_size : int
The batch size to use during training
history :
...
history_model_state :
...
fold :
...
name :
...
savedir :
...
cfg :
...
"""
if history is None:
history = pd.DataFrame(columns=['time', 'epoch', 'fold', 'loss_train', 'loss_test', 'acc_train', 'acc_test', 'cm_train', 'cm_test'])
t_start = time.time()
for epoch in range(0, N_epochs + 1):
t_epoch = time.time()
loss_iter = []
for num, (xb, yb) in enumerate(train_dl):
def closure():
optimizer.zero_grad()
loss = criterion(model(xb), yb.argmax(dim=1))
loss.backward()
return loss
if epoch == 0: # Don't take a step and just characterize the starting structure
with torch.no_grad():
loss = criterion(model(xb), yb.argmax(dim=1))
else: # Take an optimization step
loss = optimizer.step(closure)
model.clip_to_design_region()
loss_iter.append(loss.item())
with torch.no_grad():
acc_train_tmp = []
list_yb_pred = []
list_yb = []
for num, (xb, yb) in enumerate(train_dl):
yb_pred = model(xb)
list_yb_pred.append(yb_pred)
list_yb.append(yb)
if accuracy is not None:
acc_train_tmp.append( accuracy(yb_pred, yb.argmax(dim=1)) )
y_pred = torch.cat(list_yb_pred, dim=0)
y_truth = torch.cat(list_yb, dim=0)
cm_train = confusion_matrix(y_truth.argmax(dim=1).numpy(), y_pred.argmax(dim=1).numpy())
acc_test_tmp = []
loss_test_tmp = []
list_yb_pred = []
list_yb = []
cm_test = None
if test_dl is not None:
for num, (xb, yb) in enumerate(test_dl):
yb_pred = model(xb)
list_yb_pred.append(yb_pred)
list_yb.append(yb)
loss_test_tmp.append( criterion(yb_pred, yb.argmax(dim=1)) )
if accuracy is not None:
acc_test_tmp.append( accuracy_onehot(yb_pred, yb.argmax(dim=1)) )
y_pred = torch.cat(list_yb_pred, dim=0)
y_truth = torch.cat(list_yb, dim=0)
cm_test = confusion_matrix(y_truth.argmax(dim=1).numpy(), y_pred.argmax(dim=1).numpy())
print('Epoch %2d/%2d --- Elapsed Time: %4.2f min | Training Loss: %.4e | Testing Loss: %.4e | Training Accuracy: %.4f | Testing Accuracy: %.4f' %
(epoch, N_epochs, (time.time()-t_epoch)/60, np.mean(loss_iter), np.mean(loss_test_tmp), np.mean(acc_train_tmp), np.mean(acc_test_tmp)))
history = history.append({'time': | pd.to_datetime('now') | pandas.to_datetime |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = pd.PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == | pd.Period("2011-03", freq="M") | pandas.Period |
import math
import os
import pathlib
from functools import reduce
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
from experiment_definitions import ExperimentDefinitions
from data_collectors import MemtierCollector, MiddlewareCollector
class PlottingFunctions:
@staticmethod
def lineplot(dataframe, experiment_title, save_as_filename,
x=None, y=None, hue=None, style=None, ci='sd', err_style='band',
xlabel=None, ylabel=None, huelabel=None, stylelabel=None,
xlim=(0, None), ylim=(0, None),
xticks=None):
# markers = hue if style is None else True
# print(markers)
sns.lineplot(x, y, data=dataframe, legend="full", hue=hue, style=style, markers=True,
ci=ci, err_style='band').set(xlabel=xlabel, ylabel=ylabel,
title=experiment_title,
xlim=xlim, ylim=ylim)
sns.scatterplot(x, y, data=dataframe, legend=False, hue=hue, style=style,
ci=None).set(xlabel=xlabel, ylabel=ylabel,
title=experiment_title,
xlim=xlim, ylim=ylim)
if isinstance(xticks, tuple):
plt.xticks(xticks[0], xticks[1], rotation=45)
else:
if xticks[0] == 6 or xticks[0] == 2:
np.insert(xticks, 0, 0)
plt.xticks(xticks, rotation=45)
if huelabel is not None or stylelabel is not None:
legend = plt.legend(bbox_to_anchor=(1, 1), loc='upper left')
for txt in legend.get_texts():
if txt.get_text() is hue and huelabel is not None:
txt.set_text(huelabel)
continue
if txt.get_text() is style and stylelabel is not None:
txt.set_text(stylelabel)
continue
if save_as_filename is None:
plt.show()
else:
ExperimentPlotter.save_figure(save_as_filename)
@staticmethod
def barplot(dataframe, experiment_title, save_as_filename,
x=None, y=None, hue=None, ci='sd',
xlabel=None, ylabel=None, huelabel=None,
xlim=(None, None), ylim=(0, None),
xticks=None):
sns.barplot(x, y, hue, data=dataframe,
ci=ci, capsize=.1, errwidth=1.5).set(xlabel=xlabel, ylabel=ylabel, title=experiment_title,
xlim=xlim, ylim=ylim)
if isinstance(xticks, tuple):
plt.xticks(xticks[0], xticks[1], rotation=45)
else:
plt.xticks(xticks, rotation=45)
if huelabel is not None:
legend = plt.legend()
for txt in legend.get_texts():
if txt.get_text() is hue and huelabel is not None:
txt.set_text(huelabel)
continue
if save_as_filename is None:
plt.show()
else:
ExperimentPlotter.save_figure(save_as_filename)
@staticmethod
def distplot(histogram, experiment_title, save_as_filename,
bins=200, kde=False,
xlabel=None, ylabel=None, xlim=(0, None), ylim=(0, None),
xticks=None):
sns.distplot(histogram, bins=bins, kde=kde, hist=True).set(xlabel=xlabel, ylabel=ylabel,
title=experiment_title,
xlim=xlim, ylim=ylim)
if xticks is not None:
plt.xticks(*xticks)
if save_as_filename is None:
plt.show()
else:
ExperimentPlotter.save_figure(save_as_filename)
@staticmethod
def resplot(dataframe, experiment_title, save_as_filename,
x=None, y=None,
xlabel=None, ylabel=None):
sns.residplot(x, y, dataframe).set(xlabel=xlabel, ylabel=ylabel, title=experiment_title)
if save_as_filename is None:
plt.show()
else:
ExperimentPlotter.save_figure(save_as_filename)
@staticmethod
def qqplot(dataframe, experiment_title, save_as_filename,
x=None, fit_line=False):
stats.probplot(dataframe[x], dist="norm", fit=fit_line, plot=plt)
plt.title(experiment_title)
if save_as_filename is None:
plt.show()
else:
ExperimentPlotter.save_figure(save_as_filename)
@staticmethod
def plot_throughput_by_type(dataframe, experiment_title, save_as_filename,
x='Num_Clients', y='Request_Throughput', hue='RequestType', style='Worker_Threads',
ci='sd',
err_style='bars',
xlabel='Memtier Client Count', ylabel='Throughput (req/s)', huelabel='Request Type',
stylelabel='Worker Threads',
xlim=(0, None), ylim=(0, None),
xticks=None):
if xticks is None:
xticks = dataframe[x].unique()
PlottingFunctions.lineplot(dataframe, experiment_title, save_as_filename, x, y, hue, style, ci, err_style,
xlabel, ylabel, huelabel, stylelabel, xlim, ylim, xticks)
@staticmethod
def plot_throughput_family(dataframe, experiment_title, save_as_filename,
x='Num_Clients', y='Request_Throughput', hue='Worker_Threads', style=None,
ci='sd', err_style='bars',
xlabel='Memtier Client Count', ylabel='Throughput (req/s)', huelabel='Worker Threads',
stylelabel=None,
xlim=(0, None), ylim=(0, None),
xticks=None):
if xticks is None:
xticks = dataframe[x].unique()
PlottingFunctions.lineplot(dataframe, experiment_title, save_as_filename, x, y, hue, style, ci, err_style,
xlabel, ylabel, huelabel, stylelabel, xlim, ylim, xticks)
@staticmethod
def plot_response_time_by_type(dataframe, experiment_title, save_as_filename,
x='Num_Clients', y='Response_Time', hue='RequestType', style='Worker_Threads',
ci='sd',
err_style='bars',
xlabel='Memtier Client Count', ylabel='Response Time (ms)', huelabel='Request Type',
stylelabel='Worker Threads',
xlim=(0, None), ylim=(0, None),
xticks=None):
if xticks is None:
xticks = dataframe[x].unique()
PlottingFunctions.lineplot(dataframe, experiment_title, save_as_filename, x, y, hue, style, ci, err_style,
xlabel, ylabel, huelabel, stylelabel, xlim, ylim, xticks)
@staticmethod
def plot_response_time_family(dataframe, experiment_title, save_as_filename,
x='Num_Clients', y='Response_Time', hue='Worker_Threads', style=None, ci='sd',
err_style='bars',
xlabel='Memtier Client Count', ylabel='Response Time (ms)', huelabel='Worker Threads',
stylelabel=None,
xlim=(0, None), ylim=(0, None),
xticks=None):
if xticks is None:
xticks = dataframe[x].unique()
PlottingFunctions.lineplot(dataframe, experiment_title, save_as_filename, x, y, hue, style, ci, err_style,
xlabel, ylabel, huelabel, stylelabel, xlim, ylim, xticks)
@staticmethod
def plot_histogram(histogram, experiment_title, save_as_filename, bins=200, kde=False,
xlabel='Buckets (ms)', ylabel='Request Count', xlim=(0, 20), ylim=(0, 35000),
xticks=None):
if xticks is None:
xticks = (np.arange(0, (bins / 10) + 0.1, step=2.5), np.linspace(0, bins / 10, 9))
PlottingFunctions.distplot(histogram, experiment_title, save_as_filename, bins, kde,
xlabel, ylabel, xlim, ylim, xticks)
class StatisticsFunctions:
@staticmethod
def get_average_and_std(dataframe, aggregate_on):
return dataframe[aggregate_on].agg(['mean', 'std']).reset_index().rename(index=str,
columns={
"mean": aggregate_on + '_Mean',
"std": aggregate_on + '_Std'})
@staticmethod
def get_sum(dataframe, aggregate_on):
return dataframe[aggregate_on].agg(['sum']).reset_index().rename(index=str, columns={"sum": aggregate_on})
@staticmethod
def get_weighted_average(dataframe, aggregate_on):
return dataframe.apply(lambda x: np.average(x[aggregate_on], weights=x['Request_Throughput'])).reset_index() \
.rename(index=str, columns={0: aggregate_on})
@staticmethod
def get_arithmetic_mean(dataframe, aggregate_on):
return dataframe[aggregate_on].agg(['mean']).reset_index().rename(index=str, columns={"mean": aggregate_on})
@staticmethod
def get_percentiles(dataframe):
return dataframe.quantile(([.01, .05, .1, .15, .2, .25, .3, .35, .4, .45, .5, .525, .55, .575, .6, .625, .65,
.675, .7, .725, .75, .775, .8, .825, .85, .875, .90, .925, .95, .975, .99, 1])).reset_index().rename(
index=str,
columns={"level_2": 'Percentile'})
@staticmethod
def get_report_percentiles(dataframe):
return dataframe.quantile(([.25, .5, .75, .90, .99])).reset_index().rename(index=str,
columns={"level_2": 'Percentile'})
@staticmethod
def mm1(summary_table, plot=False):
calculations = []
for row in summary_table.itertuples():
lamb = row[4]
muh = row[-1]
measured_response_time = row[5]
measured_queue_waiting_time = row[6]
measured_queue_size = row[8]
traffic_intensity = lamb / muh
mean_nr_jobs_in_system = traffic_intensity / (1 - traffic_intensity)
mean_nr_jobs_in_queue = traffic_intensity * mean_nr_jobs_in_system
mean_response_time = (1 / muh) / (1 - traffic_intensity)
mean_waiting_time = traffic_intensity * mean_response_time
calculations.append({'Num_Clients': row[1],
'Worker_Threads': row[2],
'Maximum_Service_Rate': muh,
'Arrival_Rate': lamb,
'Traffic_Intensity': traffic_intensity,
'Mean_Number_Jobs_System': mean_nr_jobs_in_system,
'Measured_Response_Time': measured_response_time,
'Estimated_Response_Time': mean_response_time * 1000,
'Measured_Queue_Waiting_Time': measured_queue_waiting_time,
'Estimated_Queue_Waiting_Time': mean_waiting_time * 1000,
'Measured_Queue_Size': measured_queue_size,
'Estimated_Queue_Size': mean_nr_jobs_in_queue})
mm1_analysis = pd.DataFrame(calculations)
mm1_analysis = mm1_analysis[['Num_Clients', 'Worker_Threads', 'Maximum_Service_Rate', 'Arrival_Rate',
'Traffic_Intensity', 'Mean_Number_Jobs_System', 'Measured_Response_Time',
'Estimated_Response_Time', 'Measured_Queue_Waiting_Time',
'Estimated_Queue_Waiting_Time', 'Measured_Queue_Size', 'Estimated_Queue_Size']]
return mm1_analysis
@staticmethod
def mmm(summary_table, plot=False):
calculations = []
for row in summary_table.itertuples():
lamb = row[4]
servers = row[2] * 2
muh = row[-1] / servers
measured_response_time = row[5]
measured_queue_waiting_time = row[6]
measured_queue_size = row[8]
traffic_intensity = lamb / (muh * servers)
_param1 = math.pow(servers * traffic_intensity, servers) / (
math.factorial(servers) * (1 - traffic_intensity))
probability_zero_jobs_in_system = 1 / (1 + _param1 +
sum([pow(servers * traffic_intensity, n) / math.factorial(n) for n in
range(1, servers)]))
probability_of_queueing = probability_zero_jobs_in_system * _param1
mean_number_jobs_in_queue = (traffic_intensity * probability_of_queueing) / (1 - traffic_intensity)
mean_number_jobs_in_system = servers * traffic_intensity + mean_number_jobs_in_queue
average_utilization_each_server = traffic_intensity
mean_response_time = (1 / muh) * (1 + probability_of_queueing / (servers * (1 - traffic_intensity)))
mean_waiting_time = mean_number_jobs_in_queue / lamb
calculations.append({'Num_Clients': row[1],
'Worker_Threads': row[2],
'Maximum_Service_Rate': muh,
'Arrival_Rate': lamb,
'Traffic_Intensity': traffic_intensity,
'Mean_Number_Jobs_System': mean_number_jobs_in_system,
'Measured_Response_Time': measured_response_time,
'Estimated_Response_Time': mean_response_time * 1000,
'Measured_Queue_Waiting_Time': measured_queue_waiting_time,
'Estimated_Queue_Waiting_Time': mean_waiting_time * 1000,
'Measured_Queue_Size': measured_queue_size,
'Estimated_Queue_Size': mean_number_jobs_in_queue,
'Probability_Zero_Jobs_System': probability_zero_jobs_in_system,
'Probability_Queueing': probability_of_queueing,
'Mean_Average_Utilization_Each_Server': average_utilization_each_server})
mmm_analysis = pd.DataFrame(calculations)
mmm_analysis = mmm_analysis[['Num_Clients', 'Worker_Threads', 'Maximum_Service_Rate', 'Arrival_Rate',
'Traffic_Intensity', 'Mean_Number_Jobs_System', 'Measured_Response_Time',
'Estimated_Response_Time', 'Measured_Queue_Waiting_Time',
'Estimated_Queue_Waiting_Time', 'Measured_Queue_Size', 'Estimated_Queue_Size',
'Probability_Zero_Jobs_System', 'Probability_Queueing',
'Mean_Average_Utilization_Each_Server']]
return mmm_analysis
class ExperimentPlotter:
@staticmethod
def save_figure(save_as_filename):
current_dir = pathlib.Path(__file__).parent
figure_path = current_dir.joinpath("figures")
if not os.path.exists(figure_path):
os.makedirs(figure_path)
figure_path = figure_path.joinpath(save_as_filename + ".png")
plt.savefig(figure_path, dpi=150, bbox_inches='tight')
plt.close()
@staticmethod
def memtier_experiment(experiment_definition, histogram=False):
memtier_collector = MemtierCollector(experiment_definition)
memtier_collector.generate_dataframe(histogram)
return [[memtier_collector.dataframe_set, memtier_collector.dataframe_get],
[memtier_collector.dataframe_histogram_set, memtier_collector.dataframe_histogram_get]]
@staticmethod
def middleware_experiment(experiment_definition, histogram=False):
middleware_collector = MiddlewareCollector(experiment_definition)
middleware_collector.generate_dataframe(histogram)
return [[middleware_collector.dataframe_set, middleware_collector.dataframe_get],
[middleware_collector.dataframe_histogram_set, middleware_collector.dataframe_histogram_get]]
@staticmethod
def memtier_statistics_get_set(flattened, subexperiment, plot=True, throughput_y=(0, None),
response_time_y=(0, None)):
exp_name = "Experiment {}.{} - {}".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'],
'Memtier')
set_group = flattened[0].groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
get_group = flattened[1].groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
throughput_set = StatisticsFunctions.get_sum(set_group, 'Request_Throughput')
throughput_get = StatisticsFunctions.get_sum(get_group, 'Request_Throughput')
response_time_set = StatisticsFunctions.get_weighted_average(set_group, 'Response_Time')
response_time_get = StatisticsFunctions.get_weighted_average(get_group, 'Response_Time')
hits_get = StatisticsFunctions.get_sum(get_group, 'Hits')
misses_get = StatisticsFunctions.get_sum(get_group, 'Misses')
if plot:
concatenated_throughput = pd.concat([throughput_set.assign(RequestType='SET'),
throughput_get.assign(RequestType='GET')])
concatenated_response_time = pd.concat([response_time_set.assign(RequestType='SET'),
response_time_get.assign(RequestType='GET')])
throughput_measured = concatenated_throughput[~concatenated_throughput.Type.str.contains('Interactive')]
throughput_interactive = concatenated_throughput[
concatenated_throughput.Type.str.contains('Interactive')]
response_time_measured = concatenated_response_time[
~concatenated_response_time.Type.str.contains('Interactive')]
response_time_interactive = concatenated_response_time[
concatenated_response_time.Type.str.contains('Interactive')]
plot_base = "{}-{}_".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'])
PlottingFunctions.plot_throughput_by_type(throughput_measured, exp_name, plot_base + 'mt_throughput',
ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_measured, exp_name,
plot_base + 'mt_response_time', ylim=response_time_y)
PlottingFunctions.plot_throughput_by_type(throughput_interactive, exp_name + ' Interactive Law',
plot_base + 'mt_throughput-il', ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_interactive, exp_name + ' Interactive Law',
plot_base + 'mt_response-time-il', ylim=response_time_y)
response_time_set = pd.merge(throughput_set, response_time_set)
response_time_get = pd.merge(throughput_get, response_time_get)
hits_get = pd.merge(throughput_get, hits_get)
misses_get = pd.merge(throughput_get, misses_get)
plotted_throughput_set = throughput_set.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_throughput_get = throughput_get.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_response_time_set = response_time_set.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_response_time_get = response_time_get.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
hits_get = StatisticsFunctions.get_weighted_average(hits_get.groupby(['Num_Clients', 'Worker_Threads', 'Type']),
'Hits')
misses_get = StatisticsFunctions.get_weighted_average(
misses_get.groupby(['Num_Clients', 'Worker_Threads', 'Type']),
'Misses')
throughput_set_plotted = StatisticsFunctions.get_weighted_average(plotted_throughput_set, 'Request_Throughput')
throughput_get_plotted = StatisticsFunctions.get_weighted_average(plotted_throughput_get, 'Request_Throughput')
response_time_set_plotted = StatisticsFunctions.get_weighted_average(plotted_response_time_set, 'Response_Time')
response_time_get_plotted = StatisticsFunctions.get_weighted_average(plotted_response_time_get, 'Response_Time')
set_table_list = [throughput_set_plotted, response_time_set_plotted]
get_table_list = [throughput_get_plotted, response_time_get_plotted, misses_get, hits_get]
set_summary = reduce(lambda left, right: pd.merge(left, right,
on=['Num_Clients', 'Worker_Threads', 'Type']), set_table_list)
get_summary = reduce(lambda left, right: pd.merge(left, right,
on=['Num_Clients', 'Worker_Threads', 'Type']), get_table_list)
print(exp_name + " SET:")
print(set_summary)
print("====================\n")
print(exp_name + " GET:")
print(get_summary)
print("====================\n")
return [set_summary, get_summary]
@staticmethod
def memtier_statistics_request_family(flattened, subexperiment, r_type='SET', plot=True, throughput_y=(0, None),
response_time_y=(0, None)):
exp_name = "Experiment {}.{} - {}".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'],
'Memtier')
family = flattened[0].groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
throughput_family = StatisticsFunctions.get_sum(family, 'Request_Throughput')
response_time_family = StatisticsFunctions.get_weighted_average(family, 'Response_Time')
if plot:
concatenated_throughput = pd.concat([throughput_family.assign(RequestType=r_type)])
concatenated_response_time = pd.concat([response_time_family.assign(RequestType=r_type)])
throughput_measured = concatenated_throughput[~concatenated_throughput.Type.str.contains('Interactive')]
throughput_interactive = concatenated_throughput[concatenated_throughput.Type.str.contains('Interactive')]
response_time_measured = concatenated_response_time[
~concatenated_response_time.Type.str.contains('Interactive')]
response_time_interactive = concatenated_response_time[
concatenated_response_time.Type.str.contains('Interactive')]
plot_base = "{}-{}_".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'])
PlottingFunctions.plot_throughput_by_type(throughput_measured, exp_name, plot_base + 'mt_throughput',
ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_measured, exp_name,
plot_base + 'mt_response_time', ylim=response_time_y)
PlottingFunctions.plot_throughput_by_type(throughput_interactive, exp_name + ' Interactive Law',
plot_base + 'mt_throughput-il', ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_interactive, exp_name + ' Interactive Law',
plot_base + 'mt_response-time-il', ylim=response_time_y)
response_time_family = pd.merge(throughput_family, response_time_family)
plotted_throughput_family = throughput_family.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_response_time_family = response_time_family.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
throughput_family_plotted = StatisticsFunctions.get_weighted_average(plotted_throughput_family,
'Request_Throughput')
response_time_family_plotted = StatisticsFunctions.get_weighted_average(plotted_response_time_family,
'Response_Time')
family_table_list = [throughput_family_plotted, response_time_family_plotted]
family_summary = reduce(lambda left, right: pd.merge(left, right,
on=['Num_Clients', 'Worker_Threads', 'Type']),
family_table_list)
print(exp_name + " " + r_type + ":")
print(family_summary)
print("====================\n")
return family_summary
@staticmethod
def memtier_statistics_multiget(flattened, subexperiment, plot=True, throughput_y=(0, None),
response_time_y=(0, None)):
exp_name = "Experiment {}.{} - {}".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'],
'Memtier')
if subexperiment['subexperiment_id'] == 2:
req_types = 'Non-sharded MultiGET'
type_to_number_dict = {
"MULTIGET_1": 1,
"MULTIGET_3": 3,
"MULTIGET_6": 6,
"MULTIGET_9": 9
}
else:
req_types = 'Sharded MultiGET'
type_to_number_dict = {
"SHARDED_1": 1,
"SHARDED_3": 3,
"SHARDED_6": 6,
"SHARDED_9": 9
}
get_group = flattened[1][~flattened[1].Type.str.contains('Interactive')]
get_group['Type'] = get_group['Type'].replace(type_to_number_dict, regex=True)
pd.to_numeric(get_group['Type'])
get_group = get_group.groupby(['Type', 'Repetition', 'Worker_Threads'])
summed_get_throughput = StatisticsFunctions.get_sum(get_group, 'Request_Throughput')
average_get_response_time = StatisticsFunctions.get_weighted_average(get_group, 'Response_Time')
hits_get = StatisticsFunctions.get_sum(get_group, 'Hits')
misses_get = StatisticsFunctions.get_sum(get_group, 'Misses')
concatenated_throughput = pd.concat([summed_get_throughput.assign(RequestType='GET')])
concatenated_response_time = pd.concat([average_get_response_time.assign(RequestType='GET')])
if plot:
plot_base = "{}-{}_".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'])
PlottingFunctions.lineplot(concatenated_throughput, exp_name, plot_base + 'mt_throughput', x='Type',
y='Request_Throughput',
xlabel=req_types, ylabel='Throughput (req/s)',
xlim=(0, None), ylim=throughput_y, xticks=[1, 3, 6, 9])
PlottingFunctions.lineplot(concatenated_response_time, exp_name, plot_base + 'mt_response-time', x='Type',
y='Response_Time',
xlabel=req_types, ylabel='Response Time (ms)',
xlim=(0, None), ylim=response_time_y, xticks=[1, 3, 6, 9])
average_get_response_time = pd.merge(summed_get_throughput, average_get_response_time)
hits_get = pd.merge(summed_get_throughput, hits_get)
misses_get = pd.merge(summed_get_throughput, misses_get)
plotted_throughput_get = summed_get_throughput.groupby(['Type'])
plotted_response_time_get = average_get_response_time.groupby(['Type'])
hits_get = StatisticsFunctions.get_weighted_average(hits_get.groupby(['Type']), 'Hits')
misses_get = StatisticsFunctions.get_weighted_average(misses_get.groupby(['Type']), 'Misses')
throughput_get_plotted = StatisticsFunctions.get_weighted_average(plotted_throughput_get, 'Request_Throughput')
response_time_get_plotted = StatisticsFunctions.get_weighted_average(plotted_response_time_get, 'Response_Time')
get_table_list = [throughput_get_plotted, response_time_get_plotted, misses_get, hits_get]
get_summary = reduce(lambda left, right: pd.merge(left, right, on=['Type']), get_table_list)
print(exp_name + " GET:")
print(get_summary)
print("====================\n\n")
return get_summary
@staticmethod
def middleware_statistics_get_set(flattened, subexperiment, plot=True, throughput_y=(0, None),
response_time_y=(0, None), queue_waiting_y=(0, None),
memcached_handling_y=(0, None)):
exp_name = "Experiment {}.{} - {}".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'],
'Middleware')
set_group = flattened[0].groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
get_group = flattened[1].groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
throughput_set = StatisticsFunctions.get_sum(set_group, 'Request_Throughput')
throughput_get = StatisticsFunctions.get_sum(get_group, 'Request_Throughput')
response_time_set = StatisticsFunctions.get_weighted_average(set_group, 'Response_Time')
response_time_get = StatisticsFunctions.get_weighted_average(get_group, 'Response_Time')
set_group = flattened[0][~flattened[0].Type.str.contains('Interactive')]
set_group = set_group.groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
get_group = flattened[1][~flattened[1].Type.str.contains('Interactive')]
get_group = get_group.groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
queue_waiting_time_set = StatisticsFunctions.get_weighted_average(set_group, 'Queue_Waiting_Time')
queue_waiting_time_get = StatisticsFunctions.get_weighted_average(get_group, 'Queue_Waiting_Time')
memcached_communication_set = StatisticsFunctions.get_weighted_average(set_group, 'Memcached_Communication')
memcached_communication_get = StatisticsFunctions.get_weighted_average(get_group, 'Memcached_Communication')
queue_size_set = StatisticsFunctions.get_weighted_average(set_group, 'Queue_Size')
queue_size_get = StatisticsFunctions.get_weighted_average(get_group, 'Queue_Size')
hits_get = StatisticsFunctions.get_sum(get_group, 'Hits')
misses_get = StatisticsFunctions.get_sum(get_group, 'Misses')
if plot:
xticks = flattened[0]['Num_Clients'].unique()
concatenated_throughput = pd.concat([throughput_set.assign(RequestType='SET'),
throughput_get.assign(RequestType='GET')])
concatenated_response_time = pd.concat([response_time_set.assign(RequestType='SET'),
response_time_get.assign(RequestType='GET')])
concatenated_queue_waiting_time = pd.concat([queue_waiting_time_set.assign(RequestType='SET'),
queue_waiting_time_get.assign(RequestType='GET')])
concatenated_memcached_communication = pd.concat([memcached_communication_set.assign(RequestType='SET'),
memcached_communication_get.assign(RequestType='GET')])
concatenated_queue_size = pd.concat([queue_size_set.assign(RequestType='SET'),
queue_size_get.assign(RequestType='GET')])
throughput_measured = concatenated_throughput[~concatenated_throughput.Type.str.contains('Interactive')]
throughput_interactive = concatenated_throughput[
concatenated_throughput.Type.str.contains('Interactive')]
response_time_measured = concatenated_response_time[
~concatenated_response_time.Type.str.contains('Interactive')]
response_time_interactive = concatenated_response_time[
concatenated_response_time.Type.str.contains('Interactive')]
plot_base = "{}-{}_".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'])
PlottingFunctions.plot_throughput_by_type(throughput_measured, exp_name, plot_base + 'mw_throughput',
ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_measured, exp_name,
plot_base + 'mw_response_time', ylim=response_time_y)
PlottingFunctions.plot_throughput_by_type(throughput_interactive, exp_name + ' Interactive Law',
plot_base + 'mw_throughput-il', ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_interactive, exp_name + ' Interactive Law',
plot_base + 'mw_response-time-il', ylim=response_time_y)
PlottingFunctions.lineplot(concatenated_queue_waiting_time, exp_name, plot_base + "mw_queue-wait-time",
x='Num_Clients', y='Queue_Waiting_Time', hue='RequestType',
style='Worker_Threads', xlabel='Number Memtier Clients',
ylabel='Queue Waiting Time (ms)', huelabel='Request Type',
stylelabel='Worker Threads', xlim=(0, None), ylim=queue_waiting_y, xticks=xticks)
PlottingFunctions.lineplot(concatenated_memcached_communication, exp_name, plot_base + "mw_mc-comm-time",
x='Num_Clients', y='Memcached_Communication', hue='RequestType',
style='Worker_Threads', xlabel='Number Memtier Clients',
ylabel='Memcached Handling (ms)',
huelabel='Request Type', stylelabel='Worker Threads',
xlim=(0, None), ylim=memcached_handling_y, xticks=xticks)
PlottingFunctions.lineplot(concatenated_queue_size, exp_name, plot_base + "mw_queue-size", x='Num_Clients',
y='Queue_Size', hue='RequestType', style='Worker_Threads',
xlabel='Number Memtier Clients', ylabel='Queue Size',
huelabel='Request Type', stylelabel='Worker Threads',
xlim=(0, None), ylim=(0, None), xticks=xticks)
response_time_set = pd.merge(throughput_set, response_time_set)
response_time_get = pd.merge(throughput_get, response_time_get)
queue_waiting_time_set = pd.merge(throughput_set, queue_waiting_time_set)
queue_waiting_time_get = pd.merge(throughput_get, queue_waiting_time_get)
memcached_communication_set = pd.merge(throughput_set, memcached_communication_set)
memcached_communication_get = pd.merge(throughput_get, memcached_communication_get)
queue_size_set = pd.merge(throughput_set, queue_size_set)
queue_size_get = pd.merge(throughput_get, queue_size_get)
hits_get = pd.merge(throughput_get, hits_get)
misses_get = pd.merge(throughput_get, misses_get)
plotted_throughput_set = throughput_set.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_throughput_get = throughput_get.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_response_time_set = response_time_set.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_response_time_get = response_time_get.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_queue_waiting_time_set = queue_waiting_time_set.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_queue_waiting_time_get = queue_waiting_time_get.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_memcached_communication_set = memcached_communication_set.groupby(
['Num_Clients', 'Worker_Threads', 'Type'])
plotted_memcached_communication_get = memcached_communication_get.groupby(
['Num_Clients', 'Worker_Threads', 'Type'])
plotted_queue_size_set = queue_size_set.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_queue_size_get = queue_size_get.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
hits_get = StatisticsFunctions.get_weighted_average(hits_get.groupby(['Num_Clients', 'Worker_Threads', 'Type']),
'Hits')
misses_get = StatisticsFunctions.get_weighted_average(
misses_get.groupby(['Num_Clients', 'Worker_Threads', 'Type']), 'Misses')
throughput_set_plotted = StatisticsFunctions.get_weighted_average(plotted_throughput_set,
'Request_Throughput')
throughput_get_plotted = StatisticsFunctions.get_weighted_average(plotted_throughput_get,
'Request_Throughput')
response_time_set_plotted = StatisticsFunctions.get_weighted_average(plotted_response_time_set,
'Response_Time')
response_time_get_plotted = StatisticsFunctions.get_weighted_average(plotted_response_time_get,
'Response_Time')
queue_waiting_time_set_plotted = StatisticsFunctions.get_weighted_average(plotted_queue_waiting_time_set,
'Queue_Waiting_Time')
queue_waiting_time_get_plotted = StatisticsFunctions.get_weighted_average(plotted_queue_waiting_time_get,
'Queue_Waiting_Time')
memcached_communication_set_plotted = StatisticsFunctions.get_weighted_average(
plotted_memcached_communication_set, 'Memcached_Communication')
memcached_communication_get_plotted = StatisticsFunctions.get_weighted_average(
plotted_memcached_communication_get, 'Memcached_Communication')
queue_size_set_plotted = StatisticsFunctions.get_weighted_average(plotted_queue_size_set, 'Queue_Size')
queue_size_get_plotted = StatisticsFunctions.get_weighted_average(plotted_queue_size_get, 'Queue_Size')
set_table_list = [throughput_set_plotted, response_time_set_plotted, queue_waiting_time_set_plotted,
memcached_communication_set_plotted, queue_size_set_plotted]
get_table_list = [throughput_get_plotted, response_time_get_plotted, queue_waiting_time_get_plotted,
memcached_communication_get_plotted, queue_size_get_plotted, misses_get, hits_get]
set_summary = reduce(lambda left, right: pd.merge(left, right,
on=['Num_Clients', 'Worker_Threads', 'Type'], how='outer'),
set_table_list)
get_summary = reduce(lambda left, right: pd.merge(left, right,
on=['Num_Clients', 'Worker_Threads', 'Type'], how='outer'),
get_table_list)
print(exp_name + " SET:")
print(set_summary)
print("====================\n")
print(exp_name + " GET:")
print(get_summary)
print("====================\n")
return [set_summary, get_summary]
@staticmethod
def middleware_statistics_request_family(flattened, subexperiment, r_type='SET', plot=True, throughput_y=(0, None),
response_time_y=(0, None), queue_waiting_y=(0, None),
memcached_handling_y=(0, None)):
exp_name = "Experiment {}.{} - {}".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'],
'Middleware')
family = flattened[0].groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
throughput_family = StatisticsFunctions.get_sum(family, 'Request_Throughput')
response_time_family = StatisticsFunctions.get_weighted_average(family, 'Response_Time')
family = flattened[0][~flattened[0].Type.str.contains('Interactive')]
family = family.groupby(['Num_Clients', 'Repetition', 'Worker_Threads', 'Type'])
queue_waiting_time_family = StatisticsFunctions.get_weighted_average(family, 'Queue_Waiting_Time')
memcached_communication_family = StatisticsFunctions.get_weighted_average(family, 'Memcached_Communication')
queue_size_family = StatisticsFunctions.get_weighted_average(family, 'Queue_Size')
if plot:
xticks = flattened[0]['Num_Clients'].unique()
concatenated_throughput = pd.concat([throughput_family.assign(RequestType=r_type)])
concatenated_response_time = pd.concat([response_time_family.assign(RequestType=r_type)])
concatenated_queue_waiting_time = pd.concat([queue_waiting_time_family.assign(RequestType=r_type)])
concatenated_memcached_communication = pd.concat(
[memcached_communication_family.assign(RequestType=r_type)])
concatenated_queue_size = pd.concat([queue_size_family.assign(RequestType=r_type)])
throughput_measured = concatenated_throughput[~concatenated_throughput.Type.str.contains('Interactive')]
throughput_interactive = concatenated_throughput[concatenated_throughput.Type.str.contains('Interactive')]
response_time_measured = concatenated_response_time[
~concatenated_response_time.Type.str.contains('Interactive')]
response_time_interactive = concatenated_response_time[
concatenated_response_time.Type.str.contains('Interactive')]
plot_base = "{}-{}_".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'])
PlottingFunctions.plot_throughput_by_type(throughput_measured, exp_name, plot_base + 'mw_throughput',
ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_measured, exp_name,
plot_base + 'mw_response_time', ylim=response_time_y)
PlottingFunctions.plot_throughput_by_type(throughput_interactive, exp_name + ' Interactive Law',
plot_base + 'mw_throughput-il', ylim=throughput_y)
PlottingFunctions.plot_response_time_by_type(response_time_interactive, exp_name + ' Interactive Law',
plot_base + 'mw_response-time-il', ylim=response_time_y)
PlottingFunctions.lineplot(concatenated_queue_waiting_time, exp_name, plot_base + "mw_queue-wait-time",
x='Num_Clients', y='Queue_Waiting_Time', hue='Worker_Threads',
xlabel='Number Memtier Clients', ylabel='Queue Waiting Time (ms)',
huelabel='Worker Threads', xlim=(0, None), ylim=queue_waiting_y, xticks=xticks)
PlottingFunctions.lineplot(concatenated_memcached_communication, exp_name, plot_base + "mw_mc-comm-time",
x='Num_Clients', y='Memcached_Communication', hue='Worker_Threads',
xlabel='Number Memtier Clients',
ylabel='Memcached Handling (ms)',
huelabel='Worker Threads', xlim=(0, None), ylim=memcached_handling_y,
xticks=xticks)
PlottingFunctions.lineplot(concatenated_queue_size, exp_name, plot_base + "mw_queue-size", x='Num_Clients',
y='Queue_Size', hue='Worker_Threads', xlabel='Number Memtier Clients',
ylabel='Queue Size', huelabel='Worker Threads', xlim=(0, None), ylim=(0, None),
xticks=xticks)
response_time_family = pd.merge(throughput_family, response_time_family)
queue_waiting_time_family = pd.merge(throughput_family, queue_waiting_time_family)
memcached_communication_family = pd.merge(throughput_family, memcached_communication_family)
queue_size_family = pd.merge(throughput_family, queue_size_family)
plotted_throughput_family = throughput_family.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_response_time_family = response_time_family.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_queue_waiting_time_family = queue_waiting_time_family.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
plotted_memcached_communication_family = memcached_communication_family.groupby(
['Num_Clients', 'Worker_Threads', 'Type'])
plotted_queue_size_family = queue_size_family.groupby(['Num_Clients', 'Worker_Threads', 'Type'])
throughput_family_plotted = StatisticsFunctions.get_weighted_average(plotted_throughput_family,
'Request_Throughput')
response_time_family_plotted = StatisticsFunctions.get_weighted_average(plotted_response_time_family,
'Response_Time')
queue_waiting_time_family_plotted = StatisticsFunctions.get_weighted_average(plotted_queue_waiting_time_family,
'Queue_Waiting_Time')
memcached_communication_family_plotted = StatisticsFunctions.get_weighted_average(
plotted_memcached_communication_family, 'Memcached_Communication')
queue_size_family_plotted = StatisticsFunctions.get_weighted_average(plotted_queue_size_family, 'Queue_Size')
family_table_list = [throughput_family_plotted, response_time_family_plotted, queue_waiting_time_family_plotted,
memcached_communication_family_plotted, queue_size_family_plotted]
family_summary = reduce(lambda left, right: pd.merge(left, right,
on=['Num_Clients', 'Worker_Threads', 'Type'], how='outer'),
family_table_list)
print(exp_name + " " + r_type + ":")
print(family_summary)
print("====================\n")
return family_summary
@staticmethod
def middleware_statistics_multiget(flattened, subexperiment, plot=True, throughput_y=(0, None),
response_time_y=(0, None), queue_waiting_y=(0, None),
memcached_handling_y=(0, None)):
exp_name = "Experiment {}.{} - {}".format(subexperiment['experiment_id'], subexperiment['subexperiment_id'],
'Middleware')
if subexperiment['subexperiment_id'] == 2:
req_types = 'Non-sharded MultiGET'
type_to_number_dict = {
"MULTIGET_1": 1,
"MULTIGET_3": 3,
"MULTIGET_6": 6,
"MULTIGET_9": 9
}
else:
req_types = 'Sharded MultiGET'
type_to_number_dict = {
"SHARDED_1": 1,
"SHARDED_3": 3,
"SHARDED_6": 6,
"SHARDED_9": 9
}
get_group = flattened[1][~flattened[1].Type.str.contains('Interactive')]
get_group['Type'] = get_group['Type'].replace(type_to_number_dict, regex=True)
pd.to_numeric(get_group['Type'])
server_load = get_group['Key_Distribution'].apply(pd.Series).apply(pd.Series)
server_load.rename(columns={0: 'Server1', 1: 'Server2', 2: 'Server3'}, inplace=True)
server_load.fillna(value=0, inplace=True)
get_group = get_group.join(server_load)
get_group.drop(columns=['Key_Distribution'], inplace=True)
names = get_group.columns.tolist()
names.remove('Server1')
names.remove('Server2')
names.remove('Server3')
get_copy = get_group
get_group = get_group.groupby(['Type', 'Repetition'])
throughput_get = StatisticsFunctions.get_sum(get_group, 'Request_Throughput')
response_time_get = StatisticsFunctions.get_weighted_average(get_group, 'Response_Time')
queue_waiting_time_get = StatisticsFunctions.get_weighted_average(get_group, 'Queue_Waiting_Time')
memcached_communication_get = StatisticsFunctions.get_weighted_average(get_group, 'Memcached_Communication')
queue_size_get = StatisticsFunctions.get_weighted_average(get_group, 'Queue_Size')
hits_get = StatisticsFunctions.get_sum(get_group, 'Hits')
misses_get = StatisticsFunctions.get_sum(get_group, 'Misses')
keysize_get = StatisticsFunctions.get_weighted_average(get_group, 'Request_Size')
key_throughput_get = StatisticsFunctions.get_sum(get_group, 'Key_Throughput')
server_loads = | pd.wide_to_long(get_copy, stubnames='Server', i=names, j='Server_ID') | pandas.wide_to_long |
# Copyright 2020 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import time
import json
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
import argparse
def filter_labels(x):
if len(set(x)) == 1:
return x[0]
else:
return -1
def preprocess_raw_data(gt3x_dir, activpal_dir, user_id, gt3x_frequency, label_map):
if activpal_dir is not None:
# Read activepal file
def date_parser(x): return pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
df_ap = pd.read_csv(os.path.join(activpal_dir, str(user_id)+'.csv'),
parse_dates=['StartTime', 'EndTime'], date_parser=date_parser, usecols=['StartTime', 'EndTime', 'Behavior'])
# Flatten the activepal file to 1 second resolution
data = []
prev_end_time = None
segment_no = 0
for i in range(len(df_ap)):
x = df_ap.iloc[i]
if not (prev_end_time is None) and (x['StartTime']-prev_end_time).total_seconds() > 1:
segment_no += 1
for i in range(int((x['EndTime']-x['StartTime']).total_seconds() + 1)):
data.append([segment_no, x['StartTime'] +
timedelta(seconds=i), label_map[x['Behavior']]])
prev_end_time = x['EndTime']
df_ap = pd.DataFrame(data)
df_ap.columns = ['Segment', 'Time', 'Behavior']
else:
df_ap = None
# Find activegraph start time
with open(os.path.join(gt3x_dir, str(user_id)+'.csv'), 'r') as fp:
acc_start_time = ''
count = 0
for l in fp:
if count == 2:
acc_start_time = l.split(' ')[2].strip()
elif count == 3:
acc_start_time = l.split(' ')[2].strip() + ' ' + acc_start_time
break
count += 1
# Read activegraph file
df_acc = pd.read_csv(os.path.join(gt3x_dir, str(user_id)+'.csv'), skiprows=10)
# Aggregate at 1 second resolution
data = []
begin_time = datetime.strptime(acc_start_time, '%m/%d/%Y %H:%M:%S')
for i in range(0, len(df_acc), gt3x_frequency):
x = np.array(df_acc.iloc[i:i+gt3x_frequency])
data.append([begin_time + timedelta(seconds=i//gt3x_frequency), x])
df_acc = pd.DataFrame(data)
df_acc.columns = ['Time', 'Accelerometer']
# Create joined table
if df_ap is not None:
df = pd.merge(df_acc, df_ap, on='Time')
df['User'] = user_id
df = df[['User', 'Segment', 'Time', 'Accelerometer', 'Behavior']]
else:
df = df_acc
df['User'] = user_id
df = df[['User', 'Time', 'Accelerometer']]
return df
def extract_windows(original_df, window_size):
df = []
for (user, segment), group in original_df.groupby(["User", "Segment"]):
group.index = group["Time"]
group = group[~group.index.duplicated(keep='first')]
# [:-1] becuase the last row may not necessarily have window_size seconds of data
temp = group["Accelerometer"].resample(str(window_size)+'s', base=group.iloc[0][2].second).apply(lambda x: np.vstack(x.values.tolist()))[:-1]
temp2 = group["Time"].resample(str(window_size)+'s', base=group.iloc[0][2].second).apply(lambda x: x.values.tolist()[0])
temp = pd.concat([temp, temp2], axis=1)[:-1]
if 'Behavior' in original_df.columns:
temp2 = group["Behavior"].resample(str(window_size)+'s', base=group.iloc[0][2].second).apply(lambda x: filter_labels(x.values.tolist()))
temp = pd.concat([temp, temp2], axis=1)[:-1]
# Remove time windows with more than one label
temp = temp[temp["Behavior"] >= 0]
temp["User"] = user
temp["Segment"] = segment
if 'Behavior' in original_df.columns:
temp = temp[["User", "Segment", "Time", "Accelerometer", "Behavior"]]
temp = temp[temp["Behavior"] >= 0]
else:
temp = temp[["User", "Segment", "Time", "Accelerometer"]]
df.append(temp)
return | pd.concat(df) | pandas.concat |
"""Methods for training an agent."""
import os
import sys
import datetime
import pandas as pd
from matplotlib import pyplot as plt
from .setup_env import setup_env
def train(env_id: str, output_dir: str, monitor: bool=False) -> None:
"""
Train an agent to actuate a certain environment.
Args:
env_id: the ID of the environment to play
output_dir: the base directory to store results into
monitor: whether to monitor the operation
Returns:
None
"""
# setup the output directory based on the environment ID and current time
now = datetime.datetime.today().strftime('%Y-%m-%d_%H-%M')
output_dir = '{}/{}/DeepQAgent/{}'.format(output_dir, env_id, now)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print('writing results to {}'.format(repr(output_dir)))
weights_file = '{}/weights.h5'.format(output_dir)
# these are long to import and train is only ever called once during
# an execution lifecycle. import here to save early execution time
from src.agents import DeepQAgent
from src.util import BaseCallback
# build the environment
monitor_dir = '{}/monitor_train'.format(output_dir) if monitor else None
env = setup_env(env_id, monitor_dir)
# build the agent
agent = DeepQAgent(env, replay_memory_size=int(7.5e5))
# write some info about the agent's hyperparameters to disk
with open('{}/agent.py'.format(output_dir), 'w') as agent_file:
agent_file.write(repr(agent))
# observe frames to fill the replay memory
try:
agent.observe()
except KeyboardInterrupt:
env.close()
sys.exit(0)
# train the agent
try:
callback = BaseCallback(weights_file)
agent.train(frames_to_play=int(2.5e6), callback=callback)
except KeyboardInterrupt:
print('canceled training')
# save the weights to disk
agent.model.save_weights(weights_file, overwrite=True)
# save the training results
rewards = pd.Series(callback.scores)
losses = pd.Series(callback.losses)
rewards_losses = | pd.concat([rewards, losses], axis=1) | pandas.concat |
# The published output of this file currently lives here:
# http://share.streamlit.io/0.23.0-2EMF1/index.html?id=8hMSF5ZV3Wmbg5sA3UH3gW
import keras
import math
import numpy as np
import pandas as pd
import streamlit as st
from scipy.sparse.linalg import svds
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from streamlit.Chart import Chart
interactive_mode = False
rating_cols = ['user_id', 'item_id', 'rating', 'timestamp']
movie_cols = ['movie_id','movie_title','release_date', 'video_release_date','IMDb_URL','unknown','Action','Adventure','Animation','Childrens','Comedy','Crime','Documentary','Drama','Fantasy','Film-Noir','Horror','Musical','Mystery','Romance ','Sci-Fi','Thriller','War' ,'Western']
user_cols = ['user_id','age','gender','occupation','zip_code']
users = pd.read_csv('../data/ml-100k/u.user', sep='|', names=user_cols, encoding='latin-1')
movies = | pd.read_csv('../data/ml-100k/u.item', sep='|', names=movie_cols, encoding='latin-1') | pandas.read_csv |
from data_handler.graph_class import Graph,wl_labeling
import networkx as nx
#from utils import per_section,indices_to_one_hot
from collections import defaultdict
import numpy as np
import math
import os
from tqdm import tqdm
import pickle
import pandas as pd
#%%
def indices_to_one_hot(number, nb_classes,label_dummy=-1):
"""Convert an iterable of indices to one-hot encoded labels."""
if number==label_dummy:
return np.zeros(nb_classes)
else:
return np.eye(nb_classes)[number]
def per_section(it, is_delimiter=lambda x: x.isspace()):
ret = []
for line in it:
if is_delimiter(line):
if ret:
yield ret # OR ''.join(ret)
ret = []
else:
ret.append(line.rstrip()) # OR ret.append(line)
if ret:
yield ret
def data_streamer(data_path,batchsize_bylabel, selected_labels,balanced_shapes=False,sampling_seed=None,return_idx = False):
batch_graphs, batch_labels = [],[]
if not (sampling_seed is None):
np.random.seed(sampling_seed)
if return_idx:
batch_idx=[]
if not balanced_shapes:
for label in selected_labels:
files = os.listdir(data_path+'/label%s/'%label)
file_idx = np.random.choice(range(len(files)), size=batchsize_bylabel,replace=False)
for idx in file_idx:
batch_graphs.append(np.load(data_path+'/label%s/'%label+files[idx]))
batch_labels.append(label)
if return_idx:
ls = file_idx[idx].split('.')
batch_idx.append(int(ls[0][:5]))
if return_idx:
return batch_graphs,batch_labels,batch_idx
else:
return batch_graphs,batch_labels
else:
shapes={}
graphidx_shapes={}
for label in selected_labels:
files = os.listdir(data_path+'/label%s/'%label)
shapes[label]=[]
graphidx_shapes[label]=[]
print('label = ', label)
for filename in tqdm(files):
local_idx = int(filename.split('.')[0][5:])
graphidx_shapes[label].append(local_idx)
shapes[label].append(np.load(data_path+'/label%s/'%label+filename).shape[0])
unique_shapes= np.unique(shapes[label])
sizebylabel = batchsize_bylabel//len(unique_shapes)
for local_shape in unique_shapes:
local_idx_list = np.argwhere(shapes[label]==local_shape)[:,0]
sampled_idx = np.random.choice(local_idx_list, size=sizebylabel, replace=False)
for idx in sampled_idx:
graphidx = graphidx_shapes[label][idx]
batch_graphs.append(np.load(data_path+'/label%s/graph%s.npy'%(label,graphidx)))
batch_labels.append(label)
return batch_graphs,batch_labels
def load_local_data(data_path,name,one_hot=False,attributes=True,use_node_deg=False):
""" Load local datasets - modified version
Parameters
----------
data_path : string
Path to the data. Must link to a folder where all datasets are saved in separate folders
name : string
Name of the dataset to load.
Choices=['mutag','ptc','nci1','imdb-b','imdb-m','enzymes','protein','protein_notfull','bzr','cox2','synthetic','aids','cuneiform']
one_hot : integer
If discrete attributes must be one hotted it must be the number of unique values.
attributes : bool, optional
For dataset with both continuous and discrete attributes.
If True it uses the continuous attributes (corresponding to "Node Attr." in [5])
use_node_deg : bool, optional
Wether to use the node degree instead of original labels.
Returns
-------
X : array
array of Graph objects created from the dataset
y : array
classes of each graph
References
----------
[5] <NAME> and <NAME> and <NAME> and <NAME> and <NAME>
"Benchmark Data Sets for Graph Kernels"
"""
name_to_path_discretefeatures={'mutag':data_path+'/MUTAG_2/',
'ptc':data_path+'/PTC_MR/',
'triangles':data_path+'/TRIANGLES/'}
name_to_path_realfeatures={'enzymes':data_path+'/ENZYMES_2/',
'protein':data_path+'/PROTEINS_full/',
'protein_notfull':data_path+'/PROTEINS/',
'bzr':data_path+'/BZR/',
'cox2':data_path+'/COX2/'}
name_to_rawnames={'mutag':'MUTAG', 'ptc':'PTC_MR','triangles':'TRIANGLES',
'enzymes':'ENZYMES','protein':'PROTEINS_full','protein_notfull':'PROTEINS',
'bzr':'BZR','cox2':'COX2',
'imdb-b':'IMDB-BINARY', 'imdb-m':'IMDB-MULTI','reddit':'REDDIT-BINARY','collab':'COLLAB'}
if name in ['mutag','ptc','triangles']:
dataset = build_dataset_discretefeatures(name_to_rawnames[name],
name_to_path_discretefeatures[name],
one_hot=one_hot)
elif name in ['enzymes','protein', 'protein_notfull','bzr','cox2']:
dataset = build_dataset_realfeatures(name_to_rawnames[name], name_to_path_realfeatures[name],
type_attr='real',use_node_deg=use_node_deg)
elif name in ['imdb-b','imdb-m','reddit', 'collab']:
rawname = name_to_rawnames[name]
dataset = build_dataset_withoutfeatures(rawname, data_path+'/%s/'%rawname,use_node_deg= use_node_deg)
else:
raise 'unknown dataset'
X,y=zip(*dataset)
return np.array(X),np.array(y)
def build_noisy_circular_graph(N=20,mu=0,sigma=0.3,with_noise=False,structure_noise=False,p=None):
g=Graph()
g.add_nodes(list(range(N)))
for i in range(N):
noise=float(np.random.normal(mu,sigma,1))
if with_noise:
g.add_one_attribute(i,math.sin((2*i*math.pi/N))+noise)
else:
g.add_one_attribute(i,math.sin(2*i*math.pi/N))
g.add_edge((i,i+1))
if structure_noise:
randomint=np.random.randint(0,p)
if randomint==0:
if i<=N-3:
g.add_edge((i,i+2))
if i==N-2:
g.add_edge((i,0))
if i==N-1:
g.add_edge((i,1))
g.add_edge((N,0))
noise=float(np.random.normal(mu,sigma,1))
if with_noise:
g.add_one_attribute(N,math.sin((2*N*math.pi/N))+noise)
else:
g.add_one_attribute(N,math.sin(2*N*math.pi/N))
return g
def load_largegraphs(data_path, dataset_name,undirected=True):
abspath = os.path.abspath('./')
name_to_file = {'EU':'eu-email.p',
'village':'India_database.p',
'amazon':'amazon.p',
'wikicats':'wikicats.p'}
database = pickle.load(open(abspath+data_path+name_to_file[dataset_name],'rb'))
if not undirected:# directed graphs we could experimentally switch to undirected graphs
assert dataset_name in ['EU','wikicats']
if dataset_name in ['EU','wikicats']:
G = nx.to_numpy_array(database['G'])
node_labels = database['labels']
else:
raise 'unknown dataset name'
else: #undirected graphs
assert dataset_name in ['EU','amazon','wikicats', 'village']
if dataset_name in ['EU','amazon', 'wikicats']:
G = nx.to_numpy_array(database['G'].to_undirected())
node_labels = database['labels']
elif dataset_name in ['village']:
node_labels = database['label']
num_nodes = len(node_labels)
G_ = nx.Graph()
for i in range(num_nodes):
G_.add_node(i)
for edge in database['edges']:
G_.add_edge(edge[0], edge[1])
G= nx.adjacency_matrix(G_).toarray()
else:
raise 'unknown dataset name'
return G, node_labels
#%%
def histog(X,bins=10):
node_length=[]
for graph in X:
node_length.append(len(graph.nodes()))
return np.array(node_length),{'histo':np.histogram(np.array(node_length),bins=bins),'med':np.median(np.array(node_length))
,'max':np.max(np.array(node_length)),'min':np.min(np.array(node_length))}
def node_labels_dic(path,name):
node_dic=dict()
with open(path+name) as f:
sections = list(per_section(f))
k=1
for elt in sections[0]:
node_dic[k]=int(elt)
k=k+1
return node_dic
def node_attr_dic(path,name):
node_dic=dict()
with open(path+name) as f:
sections = list(per_section(f))
k=1
for elt in sections[0]:
node_dic[k]=[float(x) for x in elt.split(',')]
k=k+1
return node_dic
def graph_label_list(path,name,real=False):
graphs=[]
with open(path+name) as f:
sections = list(per_section(f))
k=1
for elt in sections[0]:
if real:
graphs.append((k,float(elt)))
else:
graphs.append((k,int(elt)))
k=k+1
return graphs
def graph_indicator(path,name):
data_dict = defaultdict(list)
with open(path+name) as f:
sections = list(per_section(f))
k=1
for elt in sections[0]:
data_dict[int(elt)].append(k)
k=k+1
return data_dict
def compute_adjency(path,name):
adjency= defaultdict(list)
with open(path+name) as f:
sections = list(per_section(f))
for elt in sections[0]:
adjency[int(elt.split(',')[0])].append(int(elt.split(',')[1]))
return adjency
def all_connected(X):
a=[]
for graph in X:
a.append(nx.is_connected(graph.nx_graph))
return np.all(a)
#%% TO FACTORIZE !!!!!!!!!!!
def build_dataset_discretefeatures(dataset_name,path,one_hot=False):
assert dataset_name in ['MUTAG','PTC_MR','TRIANGLES']
name_to_ncategories={'MUTAG':7, 'PTC_MR':18}
n_categories = name_to_ncategories[dataset_name]
graphs=graph_label_list(path,'%s_graph_labels.txt'%dataset_name)
adjency=compute_adjency(path,'%s_A.txt'%dataset_name)
data_dict=graph_indicator(path,'%s_graph_indicator.txt'%dataset_name)
node_dic=node_labels_dic(path,'%s_node_labels.txt'%dataset_name)
data=[]
for i in graphs:
g=Graph()
for node in data_dict[i[0]]:
g.name=i[0]
g.add_vertex(node)
if one_hot:
attr=indices_to_one_hot(node_dic[node],n_categories)
g.add_one_attribute(node,attr)
else:
g.add_one_attribute(node,node_dic[node])
for node2 in adjency[node]:
g.add_edge((node,node2))
data.append((g,i[1]))
return data
def build_dataset_realfeatures(dataset_name,path,type_attr='label',use_node_deg=False):
assert dataset_name in ['PROTEINS_full','PROTEINS','ENZYMES','BZR','COX2']
if type_attr=='label':
node_dic=node_labels_dic(path,'%s_node_labels.txt'%dataset_name)
if type_attr=='real':
node_dic=node_attr_dic(path,'%s_node_attributes.txt'%dataset_name)
graphs=graph_label_list(path,'%s_graph_labels.txt'%dataset_name)
adjency=compute_adjency(path,'%s_A.txt'%dataset_name)
data_dict=graph_indicator(path,'%s_graph_indicator.txt'%dataset_name)
data=[]
for i in graphs:
g=Graph()
for node in data_dict[i[0]]:
g.name=i[0]
g.add_vertex(node)
if not use_node_deg:
g.add_one_attribute(node,node_dic[node])
for node2 in adjency[node]:
g.add_edge((node,node2))
if use_node_deg:
node_degree_dict=dict(g.nx_graph.degree())
normalized_node_degree_dict={k:v/len(g.nx_graph.nodes()) for k,v in node_degree_dict.items() }
nx.set_node_attributes(g.nx_graph,normalized_node_degree_dict,'attr_name')
data.append((g,i[1]))
return data
def build_dataset_withoutfeatures(dataset_name, path, use_node_deg=False):
assert dataset_name in ['IMDB-MULTI','IMDB-BINARY','REDDIT-BINARY','COLLAB']
graphs=graph_label_list(path,'%s_graph_labels.txt'%dataset_name)
adjency=compute_adjency(path,'%s_A.txt'%dataset_name)
data_dict=graph_indicator(path,'%s_graph_indicator.txt'%dataset_name)
data=[]
for i in tqdm(graphs,desc='loading graphs'):
g=Graph()
for node in data_dict[i[0]]:
g.name=i[0]
g.add_vertex(node)
#g.add_one_attribute(node,node_dic[node])
for node2 in adjency[node]:
g.add_edge((node,node2))
if use_node_deg:
node_degree_dict=dict(g.nx_graph.degree())
normalized_node_degree_dict={k:v/len(g.nx_graph.nodes()) for k,v in node_degree_dict.items() }
nx.set_node_attributes(g.nx_graph,normalized_node_degree_dict,'attr_name')
data.append((g,i[1]))
return data
#%% READ EXPERIMENT RESULTS
def reader_results_FGWdictionary(dataset_name:str,
str_selection:list,
excluded_str_selection:list,
unmixing_validation:bool=False,
parameters:list =['Ntarget','lrC','lrF','init_mode_graph','batch_size','algo_seed','l2_reg'],
aggreg_params='algo_seed',
compute_statistics=True, target_resfile= 'res_clustering.csv', target_unmixingsfile= 'unmixings.npy'):
abs_path = os.path.abspath('../results/')
full_path ='%s/%s/'%(abs_path,dataset_name)
list_experiments = []
res = {}
for p in parameters:
res[p]=[]
res['loss'], res['RI'], res['best_RI']= [],[],[]
res['init_features']=[]
if compute_statistics:
res['involved_components']=[]
res['mean_components']=[]
res['min_components']=[]
res['max_components']=[]
for subrepo in os.listdir(full_path):
if np.all([str_ in subrepo for str_ in str_selection]) and (not np.any([str_ in subrepo for str_ in excluded_str_selection])):
local_path='%s/%s/'%(full_path,subrepo)
try:
# load necessary files
settings = pd.read_csv(local_path+'/settings')
if not ('_seed' in subrepo):
if not unmixing_validation:
local_res = pd.read_csv(local_path+'/res_clustering.csv')
if compute_statistics:
local_OT = pickle.load(open(local_path+'/OT_unmixings.pkl','rb'))
else:
local_res=pd.read_csv(local_path +'/res_clustering_100seeds.csv')
if compute_statistics:
local_OT = pickle.load(open(local_path+'/OT_unmixings_100seeds.pkl','rb'))
#print('local_res:', local_res)
#complete the summary dictionary
for p in parameters:
if p =='alpha':
for x in subrepo.split('_'):
if 'alpha' in x:
res['alpha'].append(np.float(x[5:]))
elif p in ['gamma_entropy','lambda_reg']:
if p in settings.keys():
res[p].append(settings[p].iloc[0])
else:
res[p].append(0)
else:
res[p].append(settings[p].iloc[0])
best_idx_dist = np.argmin(local_res['loss_mean'].values)
res['loss'].append(local_res['loss_mean'].values[best_idx_dist])
res['RI'].append(local_res['RI'].values[best_idx_dist])
res['best_RI'].append(np.max(local_res['RI'].values))
if compute_statistics:
unmixings = np.array([np.sum(T,axis=0) for T in local_OT[best_idx_dist]])
sums=np.sum(unmixings,axis=0)
res['involved_components'].append(np.sum(sums>10**(-15)))
count_components = [np.sum(x>10**(-15)) for x in unmixings]
res['mean_components'].append(np.mean(count_components))
res['max_components'].append(np.max(count_components))
res['min_components'].append(np.min(count_components))
if 'Finitkmeans' in subrepo:
res['init_features'].append('kmeans')
elif 'Finitrange' in subrepo:
res['init_features'].append('range')
else:
res['init_features'].append('random')
else:# we changed the storage method because it was too memory intensive
if not unmixing_validation:
local_res = pd.read_csv(local_path+target_resfile)
if compute_statistics:
unmixings = np.load(local_path+target_unmixingsfile)
else:
local_res=pd.read_csv(local_path +'/res_clustering_100seeds.csv')
if compute_statistics:
unmixings = np.load(local_path+'/unmixings_100seeds.npy')
#print('local_res:', local_res)
#complete the summary dictionary
for p in parameters:
if p =='alpha':
for x in subrepo.split('_'):
if 'alpha' in x:
res['alpha'].append(np.float(x[5:]))
elif p=='use_warmstart':
if not p in settings.keys():
res[p].append(False)
else:
res[p].append(settings[p].iloc[0])
elif p in ['gamma_entropy','lambda_reg']:
if p in settings.keys():
res[p].append(settings[p].iloc[0])
else:
res[p].append(0)
else:
res[p].append(settings[p].iloc[0])
best_idx_dist = np.argmin(local_res['loss_mean'].values)
res['loss'].append(local_res['loss_mean'].values[best_idx_dist])
res['RI'].append(local_res['RI'].values[best_idx_dist])
res['best_RI'].append(np.max(local_res['RI'].values))
if compute_statistics:
sums=np.sum(unmixings[best_idx_dist],axis=0)
res['involved_components'].append(np.sum(sums>10**(-15)))
count_components = [np.sum(x>10**(-15)) for x in unmixings[best_idx_dist]]
res['mean_components'].append(np.mean(count_components))
res['max_components'].append(np.max(count_components))
res['min_components'].append(np.min(count_components))
if 'Finitkmeans' in subrepo:
res['init_features'].append('kmeans')
elif 'Finitrange' in subrepo:
res['init_features'].append('range')
else:
res['init_features'].append('random')
list_experiments.append(subrepo)
except:
continue
for key in res.keys():
print('key: %s / len res: %s'%(key,len(res[key])))
stacked_df = pd.DataFrame(res)
print('stacked_df built ! shape: ', stacked_df.shape)
aggreg_dict = {}
fixed_params= []
exception_keys = ['RI','best_RI','loss']
if compute_statistics:
exception_keys+=['max_components', 'mean_components', 'min_components','involved_components']
for key in res.keys():
if not key in exception_keys+[aggreg_params]:
aggreg_dict[key]=list(np.unique(res[key]))
fixed_params.append(key)
print('fixed params:', fixed_params)
aggreg_df_instantiated = False
idx_to_explore = list(range(stacked_df.shape[0]))
first_key = fixed_params[0]
count =0
nan_count=0
mean_mapper = {}
std_mapper= {}
for key in exception_keys:
mean_mapper[key]= 'mean_%s'%key
std_mapper[key]= 'std_%s'%key
while idx_to_explore !=[]:
if count ==0:
print('len idx_to_explore:', len(idx_to_explore))
print('selected_idx:', idx_to_explore[0])
selected_exp = stacked_df.iloc[idx_to_explore[0]]
sub_df = stacked_df[stacked_df[first_key]==selected_exp[first_key]]
for param in fixed_params[1:]:
sub_df= sub_df[sub_df[param]==selected_exp[param]]
if count ==0:
print('param: %s / sub_df shape: %s'%(param,sub_df.shape))
if not aggreg_df_instantiated:
mean_aggreg_df = sub_df[exception_keys].mean(axis=0).to_frame().T
std_aggreg_df = sub_df[exception_keys].std(axis=0).to_frame().T
mean_aggreg_df.rename(mean_mapper,axis=1,inplace=True)
std_aggreg_df.rename(std_mapper,axis=1,inplace=True)
for key in fixed_params:
mean_aggreg_df[key] = sub_df[key].iloc[0]
std_aggreg_df[key] = sub_df[key].iloc[0]
#print('aggreg_df(n_exp=%s) - shape :'%n_exp,aggreg_df.shape)
aggreg_df_instantiated = True
else:
mean_local_df = sub_df[exception_keys].mean(axis=0).to_frame().T
std_local_df = sub_df[exception_keys].std(axis=0).to_frame().T
mean_local_df.rename(mean_mapper,axis=1,inplace=True)
std_local_df.rename(std_mapper,axis=1,inplace=True)
for key in fixed_params:
try:
mean_local_df[key] = sub_df[key].iloc[0]
std_local_df[key] = sub_df[key].iloc[0]
except:
nan_count+=1
mean_local_df[key] = np.nan
std_local_df[key] = np.nan
#raise 'empty df error'
continue
mean_aggreg_df = pd.concat([mean_aggreg_df.copy(),mean_local_df.copy()])
std_aggreg_df = pd.concat([std_aggreg_df.copy(),std_local_df.copy()])
if count ==0:
print('sub_df.index:', sub_df.index)
for idx in sub_df.index.to_list():
if count ==0:
print('removed_idx:', idx)
idx_to_explore.remove(idx)
count+=1
print('mean_aggreg_df: %s / std_aggreg_df: %s'%(mean_aggreg_df.shape,std_aggreg_df.shape))
aggreg_df = | pd.merge(mean_aggreg_df,std_aggreg_df) | pandas.merge |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import tempfile
import time
from collections import OrderedDict
from datetime import datetime
from string import printable
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
try:
import fastparquet
except ImportError: # pragma: no cover
fastparquet = None
try:
import sqlalchemy
except ImportError: # pragma: no cover
sqlalchemy = None
from .... import tensor as mt
from .... import dataframe as md
from ....config import option_context
from ....tests.core import require_cudf, require_ray
from ....utils import arrow_array_to_objects, lazy_import, pd_release_version
from ..dataframe import from_pandas as from_pandas_df
from ..series import from_pandas as from_pandas_series
from ..index import from_pandas as from_pandas_index, from_tileable
from ..from_tensor import dataframe_from_tensor, dataframe_from_1d_tileables
from ..from_records import from_records
ray = lazy_import("ray")
_date_range_use_inclusive = pd_release_version[:2] >= (1, 4)
def test_from_pandas_dataframe_execution(setup):
# test empty DataFrame
pdf = pd.DataFrame()
df = from_pandas_df(pdf)
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
pdf = pd.DataFrame(columns=list("ab"))
df = from_pandas_df(pdf)
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
pdf = pd.DataFrame(
np.random.rand(20, 30), index=[np.arange(20), np.arange(20, 0, -1)]
)
df = from_pandas_df(pdf, chunk_size=(13, 21))
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
def test_from_pandas_series_execution(setup):
# test empty Series
ps = pd.Series(name="a")
series = from_pandas_series(ps, chunk_size=13)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
series = from_pandas_series(ps)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
ps = pd.Series(
np.random.rand(20), index=[np.arange(20), np.arange(20, 0, -1)], name="a"
)
series = from_pandas_series(ps, chunk_size=13)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
def test_from_pandas_index_execution(setup):
pd_index = pd.timedelta_range("1 days", periods=10)
index = from_pandas_index(pd_index, chunk_size=7)
result = index.execute().fetch()
pd.testing.assert_index_equal(pd_index, result)
def test_index_execution(setup):
rs = np.random.RandomState(0)
pdf = pd.DataFrame(
rs.rand(20, 10),
index=np.arange(20, 0, -1),
columns=["a" + str(i) for i in range(10)],
)
df = from_pandas_df(pdf, chunk_size=13)
# test df.index
result = df.index.execute().fetch()
pd.testing.assert_index_equal(result, pdf.index)
result = df.columns.execute().fetch()
pd.testing.assert_index_equal(result, pdf.columns)
# df has unknown chunk shape on axis 0
df = df[df.a1 < 0.5]
# test df.index
result = df.index.execute().fetch()
pd.testing.assert_index_equal(result, pdf[pdf.a1 < 0.5].index)
s = pd.Series(pdf["a1"], index=pd.RangeIndex(20))
series = from_pandas_series(s, chunk_size=13)
# test series.index which has value
result = series.index.execute().fetch()
pd.testing.assert_index_equal(result, s.index)
s = pdf["a2"]
series = from_pandas_series(s, chunk_size=13)
# test series.index
result = series.index.execute().fetch()
pd.testing.assert_index_equal(result, s.index)
# test tensor
raw = rs.random(20)
t = mt.tensor(raw, chunk_size=13)
result = from_tileable(t).execute().fetch()
pd.testing.assert_index_equal(result, pd.Index(raw))
def test_initializer_execution(setup):
arr = np.random.rand(20, 30)
pdf = pd.DataFrame(arr, index=[np.arange(20), np.arange(20, 0, -1)])
df = md.DataFrame(pdf, chunk_size=(15, 10))
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
df = md.DataFrame(arr, index=md.date_range("2020-1-1", periods=20))
result = df.execute().fetch()
pd.testing.assert_frame_equal(
result, pd.DataFrame(arr, index=pd.date_range("2020-1-1", periods=20))
)
df = md.DataFrame(
{"prices": [100, 101, np.nan, 100, 89, 88]},
index=md.date_range("1/1/2010", periods=6, freq="D"),
)
result = df.execute().fetch()
pd.testing.assert_frame_equal(
result,
pd.DataFrame(
{"prices": [100, 101, np.nan, 100, 89, 88]},
index=pd.date_range("1/1/2010", periods=6, freq="D"),
),
)
s = np.random.rand(20)
ps = pd.Series(s, index=[np.arange(20), np.arange(20, 0, -1)], name="a")
series = md.Series(ps, chunk_size=7)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
series = md.Series(s, index=md.date_range("2020-1-1", periods=20))
result = series.execute().fetch()
pd.testing.assert_series_equal(
result, pd.Series(s, index=pd.date_range("2020-1-1", periods=20))
)
pi = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])
index = md.Index(md.Index(pi))
result = index.execute().fetch()
pd.testing.assert_index_equal(pi, result)
def test_index_only(setup):
df = md.DataFrame(index=[1, 2, 3])
pd.testing.assert_frame_equal(df.execute().fetch(), pd.DataFrame(index=[1, 2, 3]))
s = md.Series(index=[1, 2, 3])
pd.testing.assert_series_equal(s.execute().fetch(), pd.Series(index=[1, 2, 3]))
df = md.DataFrame(index=md.Index([1, 2, 3]))
pd.testing.assert_frame_equal(df.execute().fetch(), pd.DataFrame(index=[1, 2, 3]))
s = md.Series(index=md.Index([1, 2, 3]), dtype=object)
pd.testing.assert_series_equal(
s.execute().fetch(), pd.Series(index=[1, 2, 3], dtype=object)
)
def test_series_from_tensor(setup):
data = np.random.rand(10)
series = md.Series(mt.tensor(data), name="a")
pd.testing.assert_series_equal(series.execute().fetch(), pd.Series(data, name="a"))
series = md.Series(mt.tensor(data, chunk_size=3))
pd.testing.assert_series_equal(series.execute().fetch(), pd.Series(data))
series = md.Series(mt.ones((10,), chunk_size=4))
pd.testing.assert_series_equal(
series.execute().fetch(),
pd.Series(np.ones(10)),
)
index_data = np.random.rand(10)
series = md.Series(
mt.tensor(data, chunk_size=3),
name="a",
index=mt.tensor(index_data, chunk_size=4),
)
pd.testing.assert_series_equal(
series.execute().fetch(), pd.Series(data, name="a", index=index_data)
)
series = md.Series(
mt.tensor(data, chunk_size=3),
name="a",
index=md.date_range("2020-1-1", periods=10),
)
pd.testing.assert_series_equal(
series.execute().fetch(),
pd.Series(data, name="a", index=pd.date_range("2020-1-1", periods=10)),
)
def test_from_tensor_execution(setup):
tensor = mt.random.rand(10, 10, chunk_size=5)
df = dataframe_from_tensor(tensor)
tensor_res = tensor.execute().fetch()
pdf_expected = pd.DataFrame(tensor_res)
df_result = df.execute().fetch()
pd.testing.assert_index_equal(df_result.index, pd.RangeIndex(0, 10))
pd.testing.assert_index_equal(df_result.columns, pd.RangeIndex(0, 10))
pd.testing.assert_frame_equal(df_result, pdf_expected)
# test from tensor with unknown shape
tensor2 = tensor[tensor[:, 0] < 0.9]
df = dataframe_from_tensor(tensor2)
df_result = df.execute().fetch()
tensor_res = tensor2.execute().fetch()
pdf_expected = pd.DataFrame(tensor_res)
pd.testing.assert_frame_equal(df_result.reset_index(drop=True), pdf_expected)
# test converted with specified index_value and columns
tensor2 = mt.random.rand(2, 2, chunk_size=1)
df2 = dataframe_from_tensor(
tensor2, index=pd.Index(["a", "b"]), columns=pd.Index([3, 4])
)
df_result = df2.execute().fetch()
pd.testing.assert_index_equal(df_result.index, pd.Index(["a", "b"]))
pd.testing.assert_index_equal(df_result.columns, pd.Index([3, 4]))
# test converted from 1-d tensor
tensor3 = mt.array([1, 2, 3])
df3 = dataframe_from_tensor(tensor3)
result3 = df3.execute().fetch()
pdf_expected = pd.DataFrame(np.array([1, 2, 3]))
pd.testing.assert_frame_equal(pdf_expected, result3)
# test converted from identical chunks
tensor4 = mt.ones((10, 10), chunk_size=3)
df4 = dataframe_from_tensor(tensor4)
result4 = df4.execute().fetch()
pdf_expected = pd.DataFrame(tensor4.execute().fetch())
pd.testing.assert_frame_equal(pdf_expected, result4)
# from tensor with given index
tensor5 = mt.ones((10, 10), chunk_size=3)
df5 = dataframe_from_tensor(tensor5, index=np.arange(0, 20, 2))
result5 = df5.execute().fetch()
pdf_expected = pd.DataFrame(tensor5.execute().fetch(), index=np.arange(0, 20, 2))
pd.testing.assert_frame_equal(pdf_expected, result5)
# from tensor with given index that is a tensor
raw7 = np.random.rand(10, 10)
tensor7 = mt.tensor(raw7, chunk_size=3)
index_raw7 = np.random.rand(10)
index7 = mt.tensor(index_raw7, chunk_size=4)
df7 = dataframe_from_tensor(tensor7, index=index7)
result7 = df7.execute().fetch()
pdf_expected = pd.DataFrame(raw7, index=index_raw7)
pd.testing.assert_frame_equal(pdf_expected, result7)
# from tensor with given index is a md.Index
raw10 = np.random.rand(10, 10)
tensor10 = mt.tensor(raw10, chunk_size=3)
index10 = md.date_range("2020-1-1", periods=10, chunk_size=3)
df10 = dataframe_from_tensor(tensor10, index=index10)
result10 = df10.execute().fetch()
pdf_expected = pd.DataFrame(raw10, index=pd.date_range("2020-1-1", periods=10))
pd.testing.assert_frame_equal(pdf_expected, result10)
# from tensor with given columns
tensor6 = mt.ones((10, 10), chunk_size=3)
df6 = dataframe_from_tensor(tensor6, columns=list("abcdefghij"))
result6 = df6.execute().fetch()
pdf_expected = pd.DataFrame(tensor6.execute().fetch(), columns=list("abcdefghij"))
pd.testing.assert_frame_equal(pdf_expected, result6)
# from 1d tensors
raws8 = [
("a", np.random.rand(8)),
("b", np.random.randint(10, size=8)),
("c", ["".join(np.random.choice(list(printable), size=6)) for _ in range(8)]),
]
tensors8 = OrderedDict((r[0], mt.tensor(r[1], chunk_size=3)) for r in raws8)
raws8.append(("d", 1))
raws8.append(("e", pd.date_range("2020-1-1", periods=8)))
tensors8["d"] = 1
tensors8["e"] = raws8[-1][1]
df8 = dataframe_from_1d_tileables(tensors8, columns=[r[0] for r in raws8])
result = df8.execute().fetch()
pdf_expected = pd.DataFrame(OrderedDict(raws8))
pd.testing.assert_frame_equal(result, pdf_expected)
# from 1d tensors and specify index with a tensor
index_raw9 = np.random.rand(8)
index9 = mt.tensor(index_raw9, chunk_size=4)
df9 = dataframe_from_1d_tileables(
tensors8, columns=[r[0] for r in raws8], index=index9
)
result = df9.execute().fetch()
pdf_expected = pd.DataFrame(OrderedDict(raws8), index=index_raw9)
pd.testing.assert_frame_equal(result, pdf_expected)
# from 1d tensors and specify index
df11 = dataframe_from_1d_tileables(
tensors8,
columns=[r[0] for r in raws8],
index=md.date_range("2020-1-1", periods=8),
)
result = df11.execute().fetch()
pdf_expected = pd.DataFrame(
OrderedDict(raws8), index=pd.date_range("2020-1-1", periods=8)
)
pd.testing.assert_frame_equal(result, pdf_expected)
def test_from_records_execution(setup):
dtype = np.dtype([("x", "int"), ("y", "double"), ("z", "<U16")])
ndarr = np.ones((10,), dtype=dtype)
pdf_expected = pd.DataFrame.from_records(ndarr, index=pd.RangeIndex(10))
# from structured array of mars
tensor = mt.ones((10,), dtype=dtype, chunk_size=3)
df1 = from_records(tensor)
df1_result = df1.execute().fetch()
pd.testing.assert_frame_equal(df1_result, pdf_expected)
# from structured array of numpy
df2 = from_records(ndarr)
df2_result = df2.execute().fetch()
pd.testing.assert_frame_equal(df2_result, pdf_expected)
def test_read_csv_execution(setup):
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
df = pd.DataFrame(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int64),
columns=["a", "b", "c"],
)
df.to_csv(file_path)
pdf = pd.read_csv(file_path, index_col=0)
r = md.read_csv(file_path, index_col=0)
mdf = r.execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf)
# size_res = self.executor.execute_dataframe(r, mock=True)
# assert sum(s[0] for s in size_res) == os.stat(file_path).st_size
mdf2 = md.read_csv(file_path, index_col=0, chunk_bytes=10).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf2)
mdf = md.read_csv(file_path, index_col=0, nrows=1).execute().fetch()
pd.testing.assert_frame_equal(df[:1], mdf)
# test names and usecols
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
df = pd.DataFrame(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int64),
columns=["a", "b", "c"],
)
df.to_csv(file_path, index=False)
mdf = md.read_csv(file_path, usecols=["c", "b"]).execute().fetch()
pd.testing.assert_frame_equal(pd.read_csv(file_path, usecols=["c", "b"]), mdf)
mdf = (
md.read_csv(file_path, names=["a", "b", "c"], usecols=["c", "b"])
.execute()
.fetch()
)
pd.testing.assert_frame_equal(
pd.read_csv(file_path, names=["a", "b", "c"], usecols=["c", "b"]), mdf
)
mdf = (
md.read_csv(file_path, names=["a", "b", "c"], usecols=["a", "c"])
.execute()
.fetch()
)
pd.testing.assert_frame_equal(
pd.read_csv(file_path, names=["a", "b", "c"], usecols=["a", "c"]), mdf
)
mdf = md.read_csv(file_path, usecols=["a", "c"]).execute().fetch()
pd.testing.assert_frame_equal(pd.read_csv(file_path, usecols=["a", "c"]), mdf)
# test sep
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
df = pd.DataFrame(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), columns=["a", "b", "c"]
)
df.to_csv(file_path, sep=";")
pdf = pd.read_csv(file_path, sep=";", index_col=0)
mdf = md.read_csv(file_path, sep=";", index_col=0).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf)
mdf2 = (
md.read_csv(file_path, sep=";", index_col=0, chunk_bytes=10)
.execute()
.fetch()
)
pd.testing.assert_frame_equal(pdf, mdf2)
# test missing value
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
df = pd.DataFrame(
{
"c1": [np.nan, "a", "b", "c"],
"c2": [1, 2, 3, np.nan],
"c3": [np.nan, np.nan, 3.4, 2.2],
}
)
df.to_csv(file_path)
pdf = pd.read_csv(file_path, index_col=0)
mdf = md.read_csv(file_path, index_col=0).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf)
mdf2 = md.read_csv(file_path, index_col=0, chunk_bytes=12).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf2)
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
index = pd.date_range(start="1/1/2018", periods=100)
df = pd.DataFrame(
{
"col1": np.random.rand(100),
"col2": np.random.choice(["a", "b", "c"], (100,)),
"col3": np.arange(100),
},
index=index,
)
df.to_csv(file_path)
pdf = pd.read_csv(file_path, index_col=0)
mdf = md.read_csv(file_path, index_col=0).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf)
mdf2 = md.read_csv(file_path, index_col=0, chunk_bytes=100).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf2)
# test nan
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
df = pd.DataFrame(
{
"col1": np.random.rand(100),
"col2": np.random.choice(["a", "b", "c"], (100,)),
"col3": np.arange(100),
}
)
df.iloc[20:, :] = pd.NA
df.to_csv(file_path)
pdf = pd.read_csv(file_path, index_col=0)
mdf = md.read_csv(file_path, index_col=0, head_lines=10, chunk_bytes=200)
result = mdf.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
# dtypes is inferred as expected
pd.testing.assert_series_equal(
mdf.dtypes, pd.Series(["float64", "object", "int64"], index=df.columns)
)
# test compression
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.gzip")
index = pd.date_range(start="1/1/2018", periods=100)
df = pd.DataFrame(
{
"col1": np.random.rand(100),
"col2": np.random.choice(["a", "b", "c"], (100,)),
"col3": np.arange(100),
},
index=index,
)
df.to_csv(file_path, compression="gzip")
pdf = pd.read_csv(file_path, compression="gzip", index_col=0)
mdf = md.read_csv(file_path, compression="gzip", index_col=0).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf)
mdf2 = (
md.read_csv(file_path, compression="gzip", index_col=0, chunk_bytes="1k")
.execute()
.fetch()
)
pd.testing.assert_frame_equal(pdf, mdf2)
# test multiple files
for merge_small_file_option in [{"n_sample_file": 1}, None]:
with tempfile.TemporaryDirectory() as tempdir:
df = pd.DataFrame(np.random.rand(300, 3), columns=["a", "b", "c"])
file_paths = [os.path.join(tempdir, f"test{i}.csv") for i in range(3)]
df[:100].to_csv(file_paths[0])
df[100:200].to_csv(file_paths[1])
df[200:].to_csv(file_paths[2])
mdf = (
md.read_csv(
file_paths,
index_col=0,
merge_small_file_options=merge_small_file_option,
)
.execute()
.fetch()
)
| pd.testing.assert_frame_equal(df, mdf) | pandas.testing.assert_frame_equal |
import matplotlib
import numpy as np
import pandas as pd
from singlecellmultiomics.utils import is_main_chromosome, get_contig_list_from_fasta
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import pysam
import seaborn as sns
from matplotlib.patches import Circle
from itertools import product
import collections
import string
import math
# Define chromsome order:
def sort_chromosome_names(l):
chrom_values = []
for chrom in l:
chrom_value = None
chrom = chrom.replace('chr','').upper()
if chrom == 'X':
chrom_value = 99
elif chrom == 'Y':
chrom_value = 100
elif chrom == 'M' or chrom=='MT':
chrom_value = 101
elif chrom == 'EBV':
chrom_value = 102
elif chrom=='MISC_ALT_CONTIGS_SCMO':
chrom_value=999
else:
try:
chrom_value = int(chrom)
except Exception as e:
chrom_value = 999 + sum((ord(x) for x in chrom))
chrom_values.append(chrom_value)
indices = sorted(range(len(chrom_values)),key=lambda x:chrom_values[x])
return [l[idx] for idx in indices]
class GenomicPlot():
def __init__(self, ref_path, contigs=None, ignore_contigs=None):
"""
Initialise genomic plot
ref_path(str or pysam.FastaFile) : Path or handle to reference
"""
if contigs is None:
self.contigs = sort_chromosome_names(list(filter(lambda x: is_main_chromosome(x) and (ignore_contigs is None or x not in ignore_contigs) , get_contig_list_from_fasta(ref_path))))
else:
self.contigs = contigs
# Obtain the lengths:
if type(ref_path) is str:
with pysam.FastaFile(ref_path) as reference:
self.lengths = {r:l for r,l in zip(reference.references,reference.lengths) if r in self.contigs}
else:
self.lengths = {r:l for r,l in zip(ref_path.references,ref_path.lengths) if r in self.contigs}
self.total_bp = sum(self.lengths.values())
# Prune contigs with no length:
self.contigs = [contig for contig in self.contigs if contig in self.lengths]
def cn_heatmap(self, df,cell_font_size=3, max_cn=4, method='ward', cmap='bwr', yticklabels=True,
figsize=(15,20), xlabel = 'Contigs', ylabel='Cells', **kwargs ):
"""
Create a heatmap from a copy number matrix
df: triple indexed dataframe with as columns ('contig', start, end ), as rows cells/samples
cell_font_size (int): font size of the cell labels
max_cn (int) : dataframe will be clipped to this value. (Maximum copy number shown)
method (str) : clustering metric
cmap (str) : colormap used
figsize(tuple) : Size of the figure
xlabel (str) : Label for the x-axis, by default this is Contigs
ylabel (str) : Label for the x-axis, by default this is Cells
**kwargs : Arguments which will be passed to seaborn.clustermap
"""
allelic_mode = len(df.columns[0])==4
if allelic_mode:
alleles = [allele for allele in df.columns.get_level_values(0).unique() if not | pd.isna(allele) | pandas.isna |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.