prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 10 16:45:01 2017
@author: Isaac
"""
def make():
import pandas as pd
import random
import numpy as np
from datetime import datetime
from numpy import genfromtxt
from time import time
from datetime import datetime
from sqlalchemy import Column, Integer, Float, Date, String, VARCHAR
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
stadiums = pd.read_csv('dataSeed/stadiums.csv')
locations = [stadiums['City'][i].strip() + ', ' + stadiums['Country'][i].strip() for i in range(len(stadiums))]
stadiums['Team'] = stadiums['Team'].apply(lambda x: x.strip())
stadiums['Location'] = locations
stadiums_filtered = stadiums[['Stadium', 'Location']]
stadiums_filtered.columns = ['name', 'location']
countriesPopulation = pd.read_csv('dataSeed/countriesPopulation.csv')
playersData = | pd.read_csv('dataSeed/players.csv') | pandas.read_csv |
# pylint: disable=E1101
from datetime import datetime
import datetime as dt
import os
import warnings
import nose
import struct
import sys
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.core.common import is_categorical_dtype
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
import pandas.util.testing as tm
from pandas.tslib import NaT
from pandas import compat
class TestStata(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_empty_dta(self):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path,write_index=False)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
reader_114 = StataReader(self.dta1_114)
with warnings.catch_warnings(record=True) as w:
parsed_114_data = reader_114.data()
reader_114 = StataReader(self.dta1_114)
parsed_114_read = reader_114.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_read_dta1(self):
reader_114 = StataReader(self.dta1_114)
parsed_114 = reader_114.read()
reader_117 = StataReader(self.dta1_117)
parsed_117 = reader_117.read()
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
tm.assert_equal(len(w), 3)
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
parsed_114 = self.read_dta(self.dta3_114)
parsed_115 = self.read_dta(self.dta3_115)
parsed_117 = self.read_dta(self.dta3_117)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta4(self):
parsed_113 = self.read_dta(self.dta4_113)
parsed_114 = self.read_dta(self.dta4_114)
parsed_115 = self.read_dta(self.dta4_115)
parsed_117 = self.read_dta(self.dta4_117)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
# these are all categoricals
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=['x', 'y', 'z'])
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['year'] = original['year'].astype(np.int32)
original['quarter'] = original['quarter'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_read_write_dta10(self):
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
columns=['string', 'object', 'integer', 'floating',
'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['integer'] = original['integer'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {'datetime': 'tc'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
def test_write_preserves_original(self):
# 9795
np.random.seed(423)
df = pd.DataFrame(np.random.randn(5,4), columns=list('abcd'))
df.ix[2, 'a':'c'] = np.nan
df_copy = df.copy()
df.to_stata('test.dta', write_index=False)
tm.assert_frame_equal(df, df_copy)
def test_encoding(self):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
if compat.PY3:
expected = raw.kreis1849[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, compat.string_types)
else:
expected = raw.kreis1849.str.decode("latin-1")[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, unicode)
with tm.ensure_clean() as path:
encoded.to_stata(path,encoding='latin-1', write_index=False)
reread_encoded = read_stata(path, encoding='latin-1')
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
columns=['good', 'b_d', '_8number', 'astringwithmorethan32characters_'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta12(self):
original = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_1',
'astringwithmorethan32characters_2',
'+',
'-',
'short',
'delete'])
formatted = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_',
'_0astringwithmorethan32character',
'_',
'_1_',
'_short',
'_delete'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
tm.assert_equal(len(w), 1) # should get a warning for that format.
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta13(self):
s1 = Series(2**9, dtype=np.int16)
s2 = Series(2**17, dtype=np.int32)
s3 = Series(2**33, dtype=np.int64)
original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3})
original.index.name = 'index'
formatted = original
formatted['int64'] = formatted['int64'].astype(np.float64)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
formatted)
def test_read_write_reread_dta14(self):
expected = self.read_csv(self.csv14)
cols = ['byte_', 'int_', 'long_', 'float_', 'double_']
for col in cols:
expected[col] = expected[col].convert_objects(convert_numeric=True)
expected['float_'] = expected['float_'].astype(np.float32)
expected['date_td'] = pd.to_datetime(expected['date_td'], coerce=True)
parsed_113 = self.read_dta(self.dta14_113)
parsed_113.index.name = 'index'
parsed_114 = self.read_dta(self.dta14_114)
parsed_114.index.name = 'index'
parsed_115 = self.read_dta(self.dta14_115)
parsed_115.index.name = 'index'
parsed_117 = self.read_dta(self.dta14_117)
parsed_117.index.name = 'index'
tm.assert_frame_equal(parsed_114, parsed_113)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
with tm.ensure_clean() as path:
parsed_114.to_stata(path, {'date_td': 'td'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), parsed_114)
def test_read_write_reread_dta15(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime, args=('%Y-%m-%d',))
parsed_113 = self.read_dta(self.dta15_113)
parsed_114 = self.read_dta(self.dta15_114)
parsed_115 = self.read_dta(self.dta15_115)
parsed_117 = self.read_dta(self.dta15_117)
tm.assert_frame_equal(expected, parsed_114)
tm.assert_frame_equal(parsed_113, parsed_114)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
def test_timestamp_and_label(self):
original = DataFrame([(1,)], columns=['var'])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
original.to_stata(path, time_stamp=time_stamp, data_label=data_label)
reader = StataReader(path)
parsed_time_stamp = dt.datetime.strptime(reader.time_stamp, ('%d %b %Y %H:%M'))
assert parsed_time_stamp == time_stamp
assert reader.data_label == data_label
def test_numeric_column_names(self):
original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
original.index.name = 'index'
with tm.ensure_clean() as path:
# should get a warning for that format.
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path), InvalidColumnName)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
columns = list(written_and_read_again.columns)
convert_col_name = lambda x: int(x[1])
written_and_read_again.columns = map(convert_col_name, columns)
tm.assert_frame_equal(original, written_and_read_again)
def test_nan_to_missing_value(self):
s1 = Series(np.arange(4.0), dtype=np.float32)
s2 = Series(np.arange(4.0), dtype=np.float64)
s1[::2] = np.nan
s2[1::2] = np.nan
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, original)
def test_no_index(self):
columns = ['x', 'y']
original = DataFrame(np.reshape(np.arange(10.0), (5, 2)),
columns=columns)
original.index.name = 'index_not_written'
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
written_and_read_again = self.read_dta(path)
tm.assertRaises(KeyError,
lambda: written_and_read_again['index_not_written'])
def test_string_no_dates(self):
s1 = Series(['a', 'A longer string'])
s2 = Series([1.0, 2.0], dtype=np.float64)
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_large_value_conversion(self):
s0 = Series([1, 99], dtype=np.int8)
s1 = Series([1, 127], dtype=np.int8)
s2 = Series([1, 2 ** 15 - 1], dtype=np.int16)
s3 = Series([1, 2 ** 63 - 1], dtype=np.int64)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3})
original.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path),
PossiblePrecisionLoss)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified['s1'] = Series(modified['s1'], dtype=np.int16)
modified['s2'] = Series(modified['s2'], dtype=np.int32)
modified['s3'] = Series(modified['s3'], dtype=np.float64)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_dates_invalid_column(self):
original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)])
original.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path, {0: 'tc'}),
InvalidColumnName)
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified.columns = ['_0']
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_date_export_formats(self):
columns = ['tc', 'td', 'tw', 'tm', 'tq', 'th', 'ty']
conversions = dict(((c, c) for c in columns))
data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns)
original = DataFrame([data], columns=columns)
original.index.name = 'index'
expected_values = [datetime(2006, 11, 20, 23, 13, 20), # Time
datetime(2006, 11, 20), # Day
datetime(2006, 11, 19), # Week
datetime(2006, 11, 1), # Month
datetime(2006, 10, 1), # Quarter year
datetime(2006, 7, 1), # Half year
datetime(2006, 1, 1)] # Year
expected = DataFrame([expected_values], columns=columns)
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, conversions)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_write_missing_strings(self):
original = DataFrame([["1"], [None]], columns=["foo"])
expected = DataFrame([["1"], [""]], columns=["foo"])
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_bool_uint(self):
s0 = Series([0, 1, True], dtype=np.bool)
s1 = Series([0, 1, 100], dtype=np.uint8)
s2 = Series([0, 1, 255], dtype=np.uint8)
s3 = Series([0, 1, 2 ** 15 - 100], dtype=np.uint16)
s4 = Series([0, 1, 2 ** 16 - 1], dtype=np.uint16)
s5 = Series([0, 1, 2 ** 31 - 100], dtype=np.uint32)
s6 = Series([0, 1, 2 ** 32 - 1], dtype=np.uint32)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3,
's4': s4, 's5': s5, 's6': s6})
original.index.name = 'index'
expected = original.copy()
expected_types = (np.int8, np.int8, np.int16, np.int16, np.int32,
np.int32, np.float64)
for c, t in zip(expected.columns, expected_types):
expected[c] = expected[c].astype(t)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, expected)
def test_variable_labels(self):
sr_115 = StataReader(self.dta16_115).variable_labels()
sr_117 = StataReader(self.dta16_117).variable_labels()
keys = ('var1', 'var2', 'var3')
labels = ('label1', 'label2', 'label3')
for k,v in compat.iteritems(sr_115):
self.assertTrue(k in sr_117)
self.assertTrue(v == sr_117[k])
self.assertTrue(k in keys)
self.assertTrue(v in labels)
def test_minimal_size_col(self):
str_lens = (1, 100, 244)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
sr = StataReader(path)
typlist = sr.typlist
variables = sr.varlist
formats = sr.fmtlist
for variable, fmt, typ in zip(variables, formats, typlist):
self.assertTrue(int(variable[1:]) == int(fmt[1:-1]))
self.assertTrue(int(variable[1:]) == typ)
def test_excessively_long_string(self):
str_lens = (1, 244, 500)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.assertRaises(ValueError):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_missing_value_generator(self):
types = ('b','h','l')
df = DataFrame([[0.0]],columns=['float_'])
with tm.ensure_clean() as path:
df.to_stata(path)
valid_range = StataReader(path).VALID_RANGE
expected_values = ['.' + chr(97 + i) for i in range(26)]
expected_values.insert(0, '.')
for t in types:
offset = valid_range[t][1]
for i in range(0,27):
val = StataMissingValue(offset+1+i)
self.assertTrue(val.string == expected_values[i])
# Test extremes for floats
val = StataMissingValue(struct.unpack('<f',b'\x00\x00\x00\x7f')[0])
self.assertTrue(val.string == '.')
val = StataMissingValue(struct.unpack('<f',b'\x00\xd0\x00\x7f')[0])
self.assertTrue(val.string == '.z')
# Test extremes for floats
val = StataMissingValue(struct.unpack('<d',b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
self.assertTrue(val.string == '.')
val = StataMissingValue(struct.unpack('<d',b'\x00\x00\x00\x00\x00\x1a\xe0\x7f')[0])
self.assertTrue(val.string == '.z')
def test_missing_value_conversion(self):
columns = ['int8_', 'int16_', 'int32_', 'float32_', 'float64_']
smv = StataMissingValue(101)
keys = [key for key in iterkeys(smv.MISSING_VALUES)]
keys.sort()
data = []
for i in range(27):
row = [StataMissingValue(keys[i+(j*27)]) for j in range(5)]
data.append(row)
expected = DataFrame(data,columns=columns)
parsed_113 = read_stata(self.dta17_113, convert_missing=True)
parsed_115 = read_stata(self.dta17_115, convert_missing=True)
parsed_117 = | read_stata(self.dta17_117, convert_missing=True) | pandas.io.stata.read_stata |
from project import logger
from flask_mongoengine import ValidationError
from mongoengine import MultipleObjectsReturned, DoesNotExist
import pandas as pd
def get_user(id_, username=None):
from project.auth.models import User
user_obj = None
try:
if username:
user_obj = User.objects.get(username=username)
elif id_:
user_obj = User.objects.get(id=id_)
except MultipleObjectsReturned:
user_obj = User.objects(username)[0]
except DoesNotExist:
logger.warning("user or id does not exist in db")
return user_obj
def account_list_to_df(accts):
category = list()
type_ = list()
dates = list()
values = dict()
for acct in accts:
category.append(acct.name)
type_.append(acct.acct_type)
for entry in acct.history:
if entry.entry_date in values:
values[entry.entry_date].append(entry.value)
else:
dates.append(entry.entry_date.strftime('%b %y'))
values[entry.entry_date] = [entry.value]
acct_df = | pd.DataFrame(values, index=category) | pandas.DataFrame |
from __future__ import absolute_import
import numpy as np
import pandas as pd
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
from keras.utils import np_utils
from nas4candle.candle.common.default_utils import DEFAULT_SEED
from nas4candle.candle.common.default_utils import DEFAULT_DATATYPE
def convert_to_class(y_one_hot, dtype=int):
"""Converts a one-hot class encoding (array with as many positions as total
classes, with 1 in the corresponding class position, 0 in the other positions),
or soft-max class encoding (array with as many positions as total
classes, whose largest valued position is used as class membership)
to an integer class encoding.
Parameters
----------
y_one_hot : numpy array
Input array with one-hot or soft-max class encoding.
dtype : data type
Data type to use for the output numpy array.
(Default: int, integer data is used to represent the
class membership).
Return
----------
Returns a numpy array with an integer class encoding.
"""
maxi = lambda a: a.argmax()
iter_to_na = lambda i: np.fromiter(i, dtype=dtype)
return np.array([maxi(a) for a in y_one_hot])
def scale_array(mat, scaling=None):
"""Scale data included in numpy array.
Parameters
----------
mat : numpy array
Array to scale
scaling : string
String describing type of scaling to apply.
Options recognized: 'maxabs', 'minmax', 'std'.
'maxabs' : scales data to range [-1 to 1].
'minmax' : scales data to range [-1 to 1].
'std' : scales data to normal variable with
mean 0 and standard deviation 1.
(Default: None, no scaling).
Return
----------
Returns the numpy array scaled by the method specified.
If no scaling method is specified, it returns the numpy
array unmodified.
"""
if scaling is None or scaling.lower() == 'none':
return mat
# Scaling data
if scaling == 'maxabs':
# Scaling to [-1, 1]
scaler = MaxAbsScaler(copy=False)
elif scaling == 'minmax':
# Scaling to [0,1]
scaler = MinMaxScaler(copy=False)
else:
# Standard normalization
scaler = StandardScaler(copy=False)
return scaler.fit_transform(mat)
def impute_and_scale_array(mat, scaling=None):
"""Impute missing values with mean and scale data included in numpy array.
Parameters
----------
mat : numpy array
Array to scale
scaling : string
String describing type of scaling to apply.
Options recognized: 'maxabs', 'minmax', 'std'.
'maxabs' : scales data to range [-1 to 1].
'minmax' : scales data to range [-1 to 1].
'std' : scales data to normal variable with
mean 0 and standard deviation 1.
(Default: None, no scaling).
Return
----------
Returns the numpy array imputed with the mean value of the
column and scaled by the method specified. If no scaling method is specified,
it returns the imputed numpy array.
"""
imputer = Imputer(strategy='mean', axis=0, copy=False)
imputer.fit_transform(mat)
#mat = imputer.fit_transform(mat)
return scale_array(mat, scaling)
def load_X_data(train_file, test_file,
drop_cols=None, n_cols=None, shuffle=False, scaling=None,
dtype=DEFAULT_DATATYPE, seed=DEFAULT_SEED):
"""Load training and testing unlabeleled data from the files specified
and construct corresponding training and testing pandas DataFrames.
Columns to load can be selected or dropped. Order of rows
can be shuffled. Data can be rescaled.
Training and testing partitions (coming from the respective files)
are preserved.
This function assumes that the files contain a header with column names.
Parameters
----------
train_file : filename
Name of the file to load the training data.
test_file : filename
Name of the file to load the testing data.
drop_cols : list
List of column names to drop from the files being loaded.
(Default: None, all the columns are used).
n_cols : integer
Number of columns to load from the files.
(Default: None, all the columns are used).
shuffle : boolean
Boolean flag to indicate row shuffling. If True the rows are
re-ordered, if False the order in which rows are read is
preserved.
(Default: False, no permutation of the loading row order).
scaling : string
String describing type of scaling to apply.
Options recognized: 'maxabs', 'minmax', 'std'.
'maxabs' : scales data to range [-1 to 1].
'minmax' : scales data to range [-1 to 1].
'std' : scales data to normal variable with
mean 0 and standard deviation 1.
(Default: None, no scaling).
dtype : data type
Data type to use for the output pandas DataFrames.
(Default: DEFAULT_DATATYPE defined in default_utils).
seed : int
Value to intialize or re-seed the generator.
(Default: DEFAULT_SEED defined in default_utils).
Return
----------
X_train : pandas DataFrame
Data for training loaded in a pandas DataFrame and
pre-processed as specified.
X_test : pandas DataFrame
Data for testing loaded in a pandas DataFrame and
pre-processed as specified.
"""
# compensates for the columns to drop if there is a feature subselection
usecols = list(range(n_cols + len(drop_cols))) if n_cols else None
df_train = pd.read_csv(train_file, engine='c', usecols=usecols)
df_test = pd.read_csv(test_file, engine='c', usecols=usecols)
# Drop specified columns
if drop_cols is not None:
for col in drop_cols:
df_train.drop(col, axis=1, inplace=True)
df_test.drop(col, axis=1, inplace=True)
if shuffle:
df_train = df_train.sample(frac=1, random_state=seed)
df_test = df_test.sample(frac=1, random_state=seed)
X_train = df_train.values.astype(dtype)
X_test = df_test.values.astype(dtype)
mat = np.concatenate((X_train, X_test), axis=0)
# Scale data
if scaling is not None:
mat = scale_array(mat, scaling)
X_train = mat[:X_train.shape[0], :]
X_test = mat[X_train.shape[0]:, :]
return X_train, X_test
def load_X_data2(train_file, test_file,
drop_cols=None, n_cols=None, shuffle=False, scaling=None,
validation_split=0.1, dtype=DEFAULT_DATATYPE, seed=DEFAULT_SEED):
"""Load training and testing unlabeleled data from the files specified.
Further split trainig data into training and validation partitions,
and construct corresponding training, validation and testing pandas DataFrames.
Columns to load can be selected or dropped. Order of rows
can be shuffled. Data can be rescaled.
Training and testing partitions (coming from the respective files)
are preserved, but training is split into training and validation partitions.
This function assumes that the files contain a header with column names.
Parameters
----------
train_file : filename
Name of the file to load the training data.
test_file : filename
Name of the file to load the testing data.
drop_cols : list
List of column names to drop from the files being loaded.
(Default: None, all the columns are used).
n_cols : integer
Number of columns to load from the files.
(Default: None, all the columns are used).
shuffle : boolean
Boolean flag to indicate row shuffling. If True the rows are
re-ordered, if False the order in which rows are read is
preserved.
(Default: False, no permutation of the loading row order).
scaling : string
String describing type of scaling to apply.
Options recognized: 'maxabs', 'minmax', 'std'.
'maxabs' : scales data to range [-1 to 1].
'minmax' : scales data to range [-1 to 1].
'std' : scales data to normal variable with
mean 0 and standard deviation 1.
(Default: None, no scaling).
validation_split : float
Fraction of training data to set aside for validation.
(Default: 0.1, ten percent of the training data is
used for the validation partition).
dtype : data type
Data type to use for the output pandas DataFrames.
(Default: DEFAULT_DATATYPE defined in default_utils).
seed : int
Value to intialize or re-seed the generator.
(Default: DEFAULT_SEED defined in default_utils).
Return
----------
X_train : pandas DataFrame
Data for training loaded in a pandas DataFrame and
pre-processed as specified.
X_val : pandas DataFrame
Data for validation loaded in a pandas DataFrame and
pre-processed as specified.
X_test : pandas DataFrame
Data for testing loaded in a pandas DataFrame and
pre-processed as specified.
"""
# compensates for the columns to drop if there is a feature subselection
usecols = list(range(n_cols + len(drop_cols))) if n_cols else None
df_train = pd.read_csv(train_file, engine='c', usecols=usecols)
df_test = pd.read_csv(test_file, engine='c', usecols=usecols)
# Drop specified columns
if drop_cols is not None:
for col in drop_cols:
df_train.drop(col, axis=1, inplace=True)
df_test.drop(col, axis=1, inplace=True)
if shuffle:
df_train = df_train.sample(frac=1, random_state=seed)
df_test = df_test.sample(frac=1, random_state=seed)
X_train = df_train.values.astype(dtype)
X_test = df_test.values.astype(dtype)
mat = np.concatenate((X_train, X_test), axis=0)
# Scale data
if scaling is not None:
mat = scale_array(mat, scaling)
# Separate training in training and validation splits after scaling
sizeTrain = X_train.shape[0]
X_test = mat[sizeTrain:, :]
numVal = int(sizeTrain * validation_split)
X_val = mat[:numVal, :]
X_train = mat[numVal:sizeTrain, :]
return X_train, X_val, X_test
def load_Xy_one_hot_data(train_file, test_file,
class_col=None, drop_cols=None, n_cols=None, shuffle=False, scaling=None,
dtype=DEFAULT_DATATYPE, seed=DEFAULT_SEED):
"""Load training and testing data from the files specified, with a column indicated to use as label.
Construct corresponding training and testing pandas DataFrames,
separated into data (i.e. features) and labels. Labels to output are one-hot encoded (categorical).
Columns to load can be selected or dropped. Order of rows
can be shuffled. Data can be rescaled.
Training and testing partitions (coming from the respective files)
are preserved.
This function assumes that the files contain a header with column names.
Parameters
----------
train_file : filename
Name of the file to load the training data.
test_file : filename
Name of the file to load the testing data.
class_col : integer
Index of the column to use as the label.
(Default: None, this would cause the function to fail, a label
has to be indicated at calling).
drop_cols : list
List of column names to drop from the files being loaded.
(Default: None, all the columns are used).
n_cols : integer
Number of columns to load from the files.
(Default: None, all the columns are used).
shuffle : boolean
Boolean flag to indicate row shuffling. If True the rows are
re-ordered, if False the order in which rows are read is
preserved.
(Default: False, no permutation of the loading row order).
scaling : string
String describing type of scaling to apply.
Options recognized: 'maxabs', 'minmax', 'std'.
'maxabs' : scales data to range [-1 to 1].
'minmax' : scales data to range [-1 to 1].
'std' : scales data to normal variable with
mean 0 and standard deviation 1.
(Default: None, no scaling).
dtype : data type
Data type to use for the output pandas DataFrames.
(Default: DEFAULT_DATATYPE defined in default_utils).
seed : int
Value to intialize or re-seed the generator.
(Default: DEFAULT_SEED defined in default_utils).
Return
----------
X_train : pandas DataFrame
Data features for training loaded in a pandas DataFrame and
pre-processed as specified.
y_train : pandas DataFrame
Data labels for training loaded in a pandas DataFrame.
One-hot encoding (categorical) is used.
X_test : pandas DataFrame
Data features for testing loaded in a pandas DataFrame and
pre-processed as specified.
y_test : pandas DataFrame
Data labels for testing loaded in a pandas DataFrame.
One-hot encoding (categorical) is used.
"""
assert class_col != None
# compensates for the columns to drop if there is a feature subselection
usecols = list(range(n_cols + len(drop_cols))) if n_cols else None
df_train = pd.read_csv(train_file, engine='c', usecols=usecols)
df_test = pd.read_csv(test_file, engine='c', usecols=usecols)
if shuffle:
df_train = df_train.sample(frac=1, random_state=seed)
df_test = df_test.sample(frac=1, random_state=seed)
# Get class
y_train = | pd.get_dummies(df_train[class_col]) | pandas.get_dummies |
# -*- coding: utf-8 -*-
import math
import os
import seaborn as sns
import pickle
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import wandb
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
def periods_where_pv_is_null(df_inputs:pd.DataFrame):
"""
Compute the time periods where the PV generation is always 0 for the solar track.
:param df_inputs: solar track data.
:return: indices where PV is always 0.
"""
# Determine time periods where PV generation is 0
nb_days = int(df_inputs[df_inputs['ZONE_1'] == 1]['POWER'].shape[0] / 24)
max_zone1 = df_inputs[df_inputs['ZONE_1'] == 1]['POWER'].values.reshape(nb_days, 24).max(axis=0)
max_zone2 = df_inputs[df_inputs['ZONE_2'] == 1]['POWER'].values.reshape(nb_days, 24).max(axis=0)
max_zone3 = df_inputs[df_inputs['ZONE_3'] == 1]['POWER'].values.reshape(nb_days, 24).max(axis=0)
indices1 = np.where(max_zone1 == 0)[0]
indices2 = np.where(max_zone2 == 0)[0]
indices3 = np.where(max_zone3 == 0)[0]
print('zone 1', indices1)
print('zone 2', indices2)
print('zone 3', indices3)
return indices1
def wind_data(path_name: str, random_state: int = 0, test_size:int=2*12*2):
"""
Build the wind power data for the GEFcom IJF_paper case study.
"""
df_wind = pd.read_csv(path_name, parse_dates=True, index_col=0)
ZONES = ['ZONE_' + str(i) for i in range(1, 10 + 1)]
# INPUTS DESCRIPTION
# The predictors included wind forecasts at two heights, 10 and 100 m above ground level, obtained from the European Centre for Medium-range Weather Forecasts (ECMWF).
# These forecasts were for the zonal and meridional wind components (denoted u and v), i.e., projections of the wind vector on the west-east and south-north axes, respectively.
# U10 zonal wind component at 10 m
# V10 meridional wind component at 10 m
# U100 zonal wind component at 100 m
# V100 meridional wind component at 100 m
# ------------------------------------------------------------------------------------------------------------------
# Build derived features
# cf winner GEFcom2014 wind track “Probabilistic gradient boosting machines for GEFCom2014 wind forecasting”
# ------------------------------------------------------------------------------------------------------------------
# the wind speed (ws), wind energy (we), and wind direction (wd) were as follows,
# where u and v are the wind components provided and d is the density, for which we used a constant 1.0
# ws = sqrt[u**2 + v**2]
# we = 0.5 × d × ws**3
# wd = 180/π × arctan(u, v)
df_wind['ws10'] = np.sqrt(df_wind['U10'].values ** 2 + df_wind['V10'].values ** 2)
df_wind['ws100'] = np.sqrt(df_wind['U100'].values ** 2 + df_wind['V100'].values ** 2)
df_wind['we10'] = 0.5 * 1 * df_wind['ws10'].values ** 3
df_wind['we100'] = 0.5 * 1 * df_wind['ws100'].values ** 3
df_wind['wd10'] = np.arctan2(df_wind['U10'].values, df_wind['V10'].values) * 180 / np.pi
df_wind['wd100'] = np.arctan2(df_wind['U100'].values, df_wind['V100'].values) * 180 / np.pi
features = ['U10', 'V10', 'U100', 'V100', 'ws10', 'ws100', 'we10', 'we100', 'wd10', 'wd100']
data_zone = []
for zone in ZONES:
df_var = df_wind[df_wind[zone] == 1].copy()
nb_days = int(len(df_var) / 24)
zones = [df_var[zone].values.reshape(nb_days, 24)[:, 0].reshape(nb_days, 1) for zone in ZONES]
x = np.concatenate([df_var[col].values.reshape(nb_days, 24) for col in features] + zones, axis=1)
y = df_var['TARGETVAR'].values.reshape(nb_days, 24)
df_y = pd.DataFrame(data=y, index=df_var['TARGETVAR'].asfreq('D').index)
df_x = pd.DataFrame(data=x, index=df_var['TARGETVAR'].asfreq('D').index)
# Decomposition between LS, VS & TEST sets (TRAIN = LS + VS)
df_x_train, df_x_TEST, df_y_train, df_y_TEST = train_test_split(df_x, df_y, test_size=test_size,random_state=random_state, shuffle=True)
df_x_LS, df_x_VS, df_y_LS, df_y_VS = train_test_split(df_x_train, df_y_train, test_size=test_size,random_state=random_state, shuffle=True)
data_zone.append([df_x_LS, df_y_LS, df_x_VS, df_y_VS, df_x_TEST, df_y_TEST])
nb_days_LS = len(df_y_LS)
nb_days_VS = len(df_y_VS)
nb_days_TEST = len(df_y_TEST)
print('#LS %s days #VS %s days # TEST %s days' % (nb_days_LS, nb_days_VS, nb_days_TEST))
return [pd.concat([data_zone[i][j] for i in range(0, 9 + 1)], axis=0, join='inner') for j in range(0, 5 + 1)]
def load_data(path_name: str, random_state: int = 0, test_size:int=2*12*2):
"""
Build the load power data for the GEFcom IJF_paper case study.
"""
df_load = pd.read_csv(path_name, parse_dates=True, index_col=0)
features = ['w1', 'w2', 'w3', 'w4', 'w5', 'w6', 'w7', 'w8', 'w9', 'w10',
'w11', 'w12', 'w13', 'w14', 'w15', 'w16', 'w17', 'w18', 'w19', 'w20',
'w21', 'w22', 'w23', 'w24', 'w25']
max_load = df_load['LOAD'].max()
nb_days = int(len(df_load) / 24)
x = np.concatenate([df_load[col].values.reshape(nb_days, 24) for col in features], axis=1)
y = df_load['LOAD'].values.reshape(nb_days, 24) / max_load
df_y = pd.DataFrame(data=y, index=df_load['LOAD'].asfreq('D').index)
df_x = pd.DataFrame(data=x, index=df_load['LOAD'].asfreq('D').index)
# Decomposition between LS, VS & TEST sets (TRAIN = LS + VS)
df_x_train, df_x_TEST, df_y_train, df_y_TEST = train_test_split(df_x, df_y, test_size=test_size,
random_state=random_state, shuffle=True)
df_x_LS, df_x_VS, df_y_LS, df_y_VS = train_test_split(df_x_train, df_y_train, test_size=test_size,
random_state=random_state, shuffle=True)
nb_days_LS = len(df_y_LS)
nb_days_VS = len(df_y_VS)
nb_days_TEST = len(df_y_TEST)
print('#LS %s days #VS %s days # TEST %s days' % (nb_days_LS, nb_days_VS, nb_days_TEST))
return df_x_LS, df_y_LS, df_x_VS, df_y_VS, df_x_TEST, df_y_TEST
def build_pv_features(df_var:pd.DataFrame, indices:np.array):
"""
Build features for NFs multi-output.
:param df_var: (n_periods, n_features)
:param indices: index where PV generation is always 0.
# INPUTS DESCRIPTION
# Variable id. Variable name
# 078.128 Total column liquid water (tclw)
# 079.128 Total column ice water (tciw)
# 134.128 Surface pressure (SP)
# 157.128 Relative humidity at 1000 mbar (r)
# 164.128 Total cloud cover (TCC)
# 165.128 10-metre U wind component (10u)
# 166.128 10-metre V wind component (10v)
# 167.128 2-metre temperature (2T)
# 169.128 Surface solar rad down (SSRD)
# 175.128 Surface thermal rad down (STRD)
# 178.128 Top net solar rad (TSR)
# 228.128 Total precipitation (TP)
"""
n_days = int(len(df_var) / 24) # n days
# Reshaping features from (24 * n_days,) to (n_days, 24) then drop time periods where PV is always 0
y = df_var['POWER'].values.reshape(n_days, 24)
y = np.delete(y, indices, axis=1)
tclw = df_var['VAR78'].values.reshape(n_days, 24)
tclw = np.delete(tclw, indices, axis=1)
tciw = df_var['VAR79'].values.reshape(n_days, 24)
tciw = np.delete(tciw, indices, axis=1)
sp = df_var['VAR134'].values.reshape(n_days, 24)
sp = np.delete(sp, indices, axis=1)
rh = df_var['VAR157'].values.reshape(n_days, 24)
rh = np.delete(rh, indices, axis=1)
TCC = df_var['VAR164'].values.reshape(n_days, 24)
TCC = np.delete(TCC, indices, axis=1)
windU = df_var['VAR165'].values.reshape(n_days, 24)
windU = np.delete(windU, indices, axis=1)
windV = df_var['VAR166'].values.reshape(n_days, 24)
windV = np.delete(windV, indices, axis=1)
TT = df_var['VAR167'].values.reshape(n_days, 24)
TT = np.delete(TT, indices, axis=1)
SSRD = df_var['VAR169'].values.reshape(n_days, 24)
SSRD = np.delete(SSRD, indices, axis=1)
STRD = df_var['VAR175'].values.reshape(n_days, 24)
STRD = np.delete(STRD, indices, axis=1)
TSR = df_var['VAR178'].values.reshape(n_days, 24)
TSR = np.delete(TSR, indices, axis=1)
TP = df_var['VAR228'].values.reshape(n_days, 24)
TP = np.delete(TP, indices, axis=1)
zone1 = df_var['ZONE_1'].values.reshape(n_days, 24)[:, 0].reshape(n_days, 1)
zone2 = df_var['ZONE_2'].values.reshape(n_days, 24)[:, 0].reshape(n_days, 1)
zone3 = df_var['ZONE_3'].values.reshape(n_days, 24)[:, 0].reshape(n_days, 1)
x = np.concatenate([TT, SSRD, np.multiply(SSRD, SSRD), np.multiply(SSRD, TT), rh, zone1, zone2, zone3], axis=1)
return x,y
def pv_data(path_name: str, test_size:int, random_state:int=0):
"""
Build the PV data for the GEFcom IJF_paper case study.
"""
df_pv = pd.read_csv(path_name, parse_dates=True, index_col=0)
ZONES = ['ZONE_1', 'ZONE_2', 'ZONE_3']
indices = periods_where_pv_is_null(df_inputs=df_pv)
data_zone = []
for zone in ZONES:
df_var = df_pv[df_pv[zone] == 1].copy()
d_index = df_var['POWER'].asfreq('D').index
x, y = build_pv_features(df_var=df_var, indices=indices)
df_y = pd.DataFrame(data=y, index=d_index)
df_x = pd.DataFrame(data=x, index=d_index)
# Decomposition between LS, VS & TEST sets (TRAIN = LS + VS)
df_x_train, df_x_TEST, df_y_train, df_y_TEST = train_test_split(df_x, df_y, test_size=test_size, random_state=random_state, shuffle=True)
df_x_LS, df_x_VS, df_y_LS, df_y_VS = train_test_split(df_x_train, df_y_train, test_size=test_size, random_state=random_state, shuffle=True)
data_zone.append([df_x_LS, df_y_LS, df_x_VS, df_y_VS, df_x_TEST, df_y_TEST])
nb_days_LS = len(df_y_LS)
nb_days_VS = len(df_y_VS)
nb_days_TEST = len(df_y_TEST)
print('%s #LS %s days #VS %s days # TEST %s days' % (zone, nb_days_LS, nb_days_VS, nb_days_TEST))
return [ | pd.concat([data_zone[i][j] for i in [0, 1, 2]], axis=0, join='inner') | pandas.concat |
import glob
import json
import logging
import os.path
import re
from datetime import datetime
from os import mkdir
from os.path import exists, isfile, join
import pandas as pd
from bsbetl import calc_helpers, g, helpers
def save_runtime_config():
''' call this after runtime values need to be persisted '''
with open(g.CONFIG_RUNTIME_FQ, 'w') as f:
json.dump(g.CONFIG_RUNTIME, f, indent=4)
def save_and_reload_page_size(page_size_setting :str, page_size_wanted :int) ->int:
''' save a page size setting to runtime config '''
print(f"get_save_page_size(page_size_setting='{page_size_setting}',page_size_wanted={page_size_wanted}) ...")
# allow user to decide his own page size. 0 means sensible defaults
if isinstance(page_size_wanted, int) and page_size_wanted > 0:
# save the new page size
g.CONFIG_RUNTIME[page_size_setting] = page_size_wanted
save_runtime_config()
# reload from config
page_size = g.CONFIG_RUNTIME[page_size_setting]
return page_size
# def get_last_trade_timestamp(hint_filename_fq) -> str:
# """ extracts the 'Last trade' timestamp from a hint file """
# if not os.path.exists(hint_filename_fq):
# return ''
# f = open(hint_filename_fq, 'r')
# # read contents eg '1581 trades written. Last trade: 2020-09-21 17:36:25,9.15,0,2403'
# hint_contents = f.read()
# f.close()
# # this should return eg '2020-09-21 17:36:25' from the above example
# match_obj = re.search(
# r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}", hint_contents)
# if match_obj is not None:
# return match_obj.group()
# else:
# return ""
def tail(file, n=1, bs=1024):
""" Read Last n Lines of file
credit:
https://www.roytuts.com/read-last-n-lines-from-file-using-python/
https://github.com/roytuts/python/blob/master/read-lines-from-last/last_lines_file.py
"""
f = open(file)
f.seek(0, 2)
l = 1-f.read(1).count('\n')
B = f.tell()
while n >= l and B > 0:
block = min(bs, B)
B -= block
f.seek(B, 0)
l += f.read(block).count('\n')
f.seek(B, 0)
l = min(l, n)
lines = f.readlines()[-l:]
f.close()
return lines
def get_first_trade_timestamp(trades_csv_fq: str) -> str:
if not os.path.exists(trades_csv_fq):
return ''
with open(trades_csv_fq, 'r') as f:
first_line = f.readline()
if first_line.startswith('date_time,'):
first_line = f.readline()
match_obj = re.search(
r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}", first_line)
if match_obj is not None:
return match_obj.group()
else:
return ""
def get_last_trades_timestamp(trades_csv_fq: str) -> str:
if not os.path.exists(trades_csv_fq):
return ''
last_line = ''
end_bit = tail(trades_csv_fq, n=1)
if len(end_bit) > 0:
last_line = end_bit[0]
else:
return ''
# last line could be '' or
# date_time,price,volume,vol_cum
# or
# 2020-01-10 14:49:50,13,160,160
match_obj = re.search(
r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}", last_line)
if match_obj is not None:
return match_obj.group()
else:
return ""
def check_monotonicity(trades_csv_fq: str) -> tuple:
''' returns line number where monotonicity fails, else 0 if all good '''
monotonic = True
line_num = 0
last_stamp = '0000-00-00 00:00:00'
logging.info(f'monotonicity checking {trades_csv_fq} ...')
with open(trades_csv_fq, 'r') as f:
for line in f:
line_num = line_num+1
if len(line) > 0 and (not line.startswith('date_time')):
# make sure its a valid timestamp
match_obj = re.search(
r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}", line)
# test
if (match_obj is not None) and (match_obj.group() < last_stamp):
# break in monotonicity
monotonic = False
break
elif match_obj is not None:
last_stamp = match_obj.group()
if monotonic:
return (0, '')
else:
return (line_num, last_stamp)
def customize_plot_grid(ax):
# Don't allow the axis to be on top of your data
ax.set_axisbelow(True)
# Turn on the minor TICKS, which are required for the minor GRID
ax.minorticks_on()
# Customize the major grid
ax.grid(which='major', linestyle='-', linewidth='0.5', color='red')
# Customize the minor grid
ax.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
return
def prepare_csv_outfile_paths(sharelist_tuples: list, csv_by_share_path: str) -> tuple:
""" Ensure By_Day folders exist for each share
Also return a tuple consisting of
(list of out_sharefile_paths, share_num keyed dictionary of last written timestamp)
"""
out_sharefile_paths = []
out_sharefile_last_tstamp = {}
for share_name, share_num in sharelist_tuples:
share_num_without_bourse = share_num[0:-4] # used as a key below
if calc_helpers.check_share_num(share_num):
# (csv_by_share_path assumed to exist)
single_share_directory = f'{csv_by_share_path}\{share_num}'
if not exists(single_share_directory):
mkdir(single_share_directory)
logging.warn(
f'folder {single_share_directory} had to be created')
# the name of the CSV file we'll be creating or appending to
out_sharefile_fq_name = f'{csv_by_share_path}\{share_num}\{share_num}.CSV'
# gather list of output filenames for use below
out_sharefile_paths.append(out_sharefile_fq_name)
# also obtain timestamp of the last record currently in the outfile
# index it without the bourse suffix
out_sharefile_last_tstamp[share_num_without_bourse] = get_last_trades_timestamp(out_sharefile_fq_name)
else:
logging.warn(
f'share_number {share_num} has incorrect format. missing or incorrect bourse suffix?')
return (out_sharefile_paths, out_sharefile_last_tstamp)
def prepare_src_csv_files_list(csv_by_day_path: str, start_date: str) -> list:
# prepare src_csv_files_list
init_src_csv_files_list = [f for f in os.listdir(csv_by_day_path) if f.endswith(
'TXT.CSV') and isfile(join(csv_by_day_path, f))]
# discard src_csv files whose date predates start_date
src_csv_files_list = [
f for f in init_src_csv_files_list if f >= f'{start_date}.TXT.CSV']
return src_csv_files_list
def build_share2handleindex_dict(file_handles: list) -> dict:
""" given list of open file handles, create a dictionary relating share_num part to index within file_handles list"""
dict_out = {}
for i, fh in enumerate(file_handles):
name = fh.name # ='C:\BsbEtl\OUT\By_Share\120470.ETR\120470.ETR.CSV'
share_part = name[-14:-8] # 120470 # NOTE bourse part .ETR is stripped as the key
dict_out[share_part] = i
return dict_out
def write_share_hint_file(ctx, share_name: str, share_num: str, start_date: str):
""" writes a text file to the share folder whose name indicates the full name of the share and whose content tels the user the trading range """
csv_by_share_path = helpers.etl_path(ctx, g.CSV_BY_SHARE_FOLDER)
out_sharefile_fq_name = f'{csv_by_share_path}\{share_num}\{share_num}.CSV'
# peek into it (if existing) and retrieve the datetime stamp of the first trade
first_trade_time_stamp = get_first_trade_timestamp(
out_sharefile_fq_name)
# this stamp needs to be cleaned up since it will form part of the info filename
# replace non word chars with underscores
first_trade_time_stamp = re.sub(r"[\W]", "_", first_trade_time_stamp)
#last_trade_time_stamp = get_last_trade_timestamp(out_sharefile_fq_name) # this should return eg 2020-09-21 17:36:25 (or '')
single_share_directory = f'{csv_by_share_path}\{share_num}'
# select the appropriate slug
hint_slug = start_date if first_trade_time_stamp == '' else first_trade_time_stamp
# put down a hint file whose name informs user of the start date of trades
scrubbed_share_name = re.sub(r"[\W]", "_", share_name)
hint_filename_fq = single_share_directory + \
f"\\_{hint_slug}_{scrubbed_share_name}.info"
# remove former .info file
for fl in glob.glob(f'{single_share_directory}\\*.info'):
os.remove(fl)
# write the hint file
open(hint_filename_fq, 'a').close()
def update_share_hint_files(ctx, sharelist_tuples: list, start_date: str):
""" loop thru all share folders depositing updated share hint files """
for share_name, share_number in sharelist_tuples:
write_share_hint_file(ctx, share_name, share_number, start_date)
def get_last_stored_df(share_num: str, stage: int = 1) -> pd.DataFrame:
""" extract share dataframe from stage appropriate HDFStore and return it """
data_store = pd.HDFStore(g.SHARE_STORE_FQ.format(stage))
try:
assert len(share_num) == 10, f'share_num {share_num} malformed'
share_key = share_num[-3:] + '_' + share_num[0:-4]
# extract dataframe
df = data_store[share_key]
return df
except AssertionError as ae:
logging.error(f'Assertion Error {ae}')
return None
except KeyError as ke:
# print(f'Key Error {ke}')
logging.error(f'Key Error {ke}')
return None
finally:
data_store.close()
def add_df_columns(dest_df :pd.DataFrame, src_df :pd.DataFrame, columns_to_add :list):
for col in columns_to_add:
if not col in dest_df.columns:
dest_df.loc[:,col] = src_df[col]
def load_overview_df(sharelist,stage):
''' load and return an overview '''
try:
ov_fn = g.OVERVIEW_STORE_FQ.format(stage)
ov_key=g.HDFSTORE_OV_KEY.format(sharelist,stage)
ov_data_store = | pd.HDFStore(ov_fn) | pandas.HDFStore |
import numpy as np
import pandas as pd
from datetime import datetime
import pytest
import empyrical
from vectorbt import defaults
from vectorbt.records.drawdowns import Drawdowns
from tests.utils import isclose
day_dt = np.timedelta64(86400000000000)
index = pd.DatetimeIndex([
datetime(2018, 1, 1),
datetime(2018, 1, 2),
datetime(2018, 1, 3),
datetime(2018, 1, 4),
datetime(2018, 1, 5)
])
ts = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [5, 4, 3, 2, 1],
'c': [1, 2, 3, 2, 1]
}, index=index)
ret = ts.pct_change()
defaults.returns['year_freq'] = '252 days' # same as empyrical
factor_returns = pd.DataFrame({
'a': ret['a'] * np.random.uniform(0.8, 1.2, ret.shape[0]),
'b': ret['b'] * np.random.uniform(0.8, 1.2, ret.shape[0]) * 2,
'c': ret['c'] * np.random.uniform(0.8, 1.2, ret.shape[0]) * 3
})
# ############# accessors.py ############# #
class TestAccessors:
def test_freq(self):
assert ret.vbt.returns.freq == day_dt
assert ret['a'].vbt.returns.freq == day_dt
assert ret.vbt.returns(freq='2D').freq == day_dt * 2
assert ret['a'].vbt.returns(freq='2D').freq == day_dt * 2
assert pd.Series([1, 2, 3]).vbt.returns.freq is None
assert pd.Series([1, 2, 3]).vbt.returns(freq='3D').freq == day_dt * 3
assert pd.Series([1, 2, 3]).vbt.returns(freq=np.timedelta64(4, 'D')).freq == day_dt * 4
def test_year_freq(self):
assert ret.vbt.returns.year_freq == pd.to_timedelta(defaults.returns['year_freq'])
assert ret['a'].vbt.returns.year_freq == pd.to_timedelta(defaults.returns['year_freq'])
assert ret['a'].vbt.returns(year_freq='365 days').year_freq == pd.to_timedelta('365 days')
assert ret.vbt.returns(year_freq='365 days').year_freq == pd.to_timedelta('365 days')
def test_ann_factor(self):
assert ret['a'].vbt.returns(year_freq='365 days').ann_factor == 365
assert ret.vbt.returns(year_freq='365 days').ann_factor == 365
with pytest.raises(Exception) as e_info:
assert pd.Series([1, 2, 3]).vbt.returns(freq=None).ann_factor
def test_from_price(self):
pd.testing.assert_series_equal(pd.Series.vbt.returns.from_price(ts['a'])._obj, ts['a'].pct_change())
pd.testing.assert_frame_equal(pd.DataFrame.vbt.returns.from_price(ts)._obj, ts.pct_change())
assert | pd.Series.vbt.returns.from_price(ts['a'], year_freq='365 days') | pandas.Series.vbt.returns.from_price |
"""Geographical extracts of natural increase, nom and nim
"""
from pathlib import Path
import pandas as pd
import data
import file_paths
from data import read_abs_data, read_abs_meta_data
DATA_ABS_PATH = Path.home() / "Documents/Analysis/Australian economy/Data/ABS"
def read_3101():
series_id = data.series_id_3101()
return data.read_abs_data(series_id=series_id)
def nom(df=None):
"""Exract NOM data
Parameters
----------
df : [type], optional
[description], by default None
"""
if df is None:
df = read_3101()
return df.net_overseas_migration
def nom_year_ending(df_nom=None):
"""Return year ending nom
Parameters
----------
nom : [type], optional
[description], by default None
"""
if df_nom is None:
df_nom = read_3101()
return df_nom.net_overseas_migration.rolling(4).sum().dropna()
def nom_year_ending_annual(df_nom=None, quarter="A-Jun"):
"""Return year ending for a given quarter
Parameters
----------
df_nom : Pandas series, optional
contains nom in sub-annual data
"""
if df_nom is None:
df_nom = nom()
# check there are 4 quarters that match the periodicity of "quarter"
# find the first quart to match ending quarter, and remove elements to the subsequent quarter
for i, date_ in enumerate(df_nom.index[:4]):
if date_.strftime("%b") == quarter[-3:]:
idx = i + 1
df_nom = df_nom.iloc[idx:]
break
if df_nom.index[3].strftime("%b") != quarter[-3:]:
print("1st DATE VALUE IS NOT A FULL YEAR")
nom_annual = df_nom.resample(quarter).sum()
# remove last year if not full year (ie nom last period == quarter parameter)
if df_nom.index[-1].strftime("%b") != quarter[-3:]:
nom_annual = nom_annual.iloc[:-1]
return nom_annual
def component_shares_between_dates(df):
"""
Calculate the nom and natural contribution to population growth over the period covered
by the dataframe.
Parameters
----------
df: a dataframe of ABS 3101, with column names already cleaned
(ie lower cased, and joined with "_")
Returns:
None but prints out a summary of population increase and component contributions
"""
if not isinstance(df.index, pd.DatetimeIndex):
raise ValueError("Chris - the dataframe does not have a time series index")
idx_erp_start = df.first_valid_index()
# Sum of components must start from 2nd period - components in first period
# contribute to the start ERP only
idx_component_start = df.iloc[1:].first_valid_index()
idx_erp_end = df.last_valid_index()
pop_delta = (
df.loc[idx_erp_end].estimated_resident_population
- df.loc[idx_erp_start].estimated_resident_population
)
pop_deta_pct_increase = (
pop_delta / df.loc[idx_erp_start].estimated_resident_population
)
nom = df.loc[idx_component_start:].net_overseas_migration.sum()
natural_increase = df.loc[idx_component_start:].natural_increase.sum()
components = nom + natural_increase
nom_share = nom / components
natural_increase_share = natural_increase / components
print(f"Between {idx_erp_start:%Y-%m-%d} and {idx_erp_end:%Y-%m-%d}:\n")
print(
f"Population increased {pop_delta * 1000:,.0f} ({pop_deta_pct_increase:.1%}) people.\n"
)
print(
f"{nom_share:.1%} from NOM, {natural_increase_share:.1%} from natural increase."
)
return
def annual_population_components(df=None, month=6):
"""
TODO: read in 3101 rather than passing in as df
Calculate annual nom and natural increase components over the period covered by a 3101 dataframe.
Parameters
----------
df: a dataframe of ABS 3101, with column names already cleaned
(ie lower cased, and joined with "_")
Returns:
a dataframe
"""
if df is None:
df = read_3101()
ERP = df[df.index.month == month].estimated_resident_population
ERP_flow = ERP.diff()
ERP_flow.name = "ERP_flow"
NOM = df.net_overseas_migration.rolling(4).sum()
NOM = NOM[NOM.index.month == month]
natural = df.natural_increase.rolling(4).sum()
natural = natural[natural.index.month == month]
population = pd.concat([ERP, ERP_flow, natural, NOM], axis=1)
## Adjust nom for period 1996 through 2005
# population.loc["1996":"2005", "net_overseas_migration"] = population.loc["1996":"2005", "net_overseas_migration"] * 1.25
population = population.assign(
NI_and_NOM=lambda x: x[["natural_increase", "net_overseas_migration"]].sum(
axis=1
)
)
# adjust NOM and natural increase to be correct levels of ERP - apportion intercensal equally
nom_intercensal_NOM_share = (
population.net_overseas_migration / population.NI_and_NOM
)
population = population.assign(
nom_adj=lambda x: nom_intercensal_NOM_share * x.ERP_flow
).assign(
natural_increase_adj=lambda x: (1 - nom_intercensal_NOM_share) * x.ERP_flow
)
return population
def get_pop_by_age(region=None, gender=None):
filepath = file_paths.abs_data_folder / "3101 age by year by gender.parquet"
df = | pd.read_parquet(filepath) | pandas.read_parquet |
""" this is a mixture of the best #free twitter sentimentanalysis modules on github.
i took the most usable codes and mixed them into one because all of them
where for a linguistical search not usable and did not show a retweet or a full tweet
no output as csv, only few informations of a tweet, switching language
or even to compare linguistic features in tweets of two different langauges and etc. etc ...
special and many many thanks to https://github.com/vprusso/youtube_tutorials who showed on his
page a tutorial on how to do a sentimentanalysis with python
i did this for users with not much skills and linguistical background to help them to get a corpus of twitterdata
and to show them how to do a comparison between sentence based vs document based sentimentanalysis
credits to all AVAILABLE FREE AND SIMPLE sentimentanalysis programms (dec. 2019) on github.
many thanks to everybody and of course to github for making this exchange and usage possible!
cemre koc (Goethe University, Frankfurt) Python3.7
"""
from textblob import TextBlob #Sentimentlexikon FOR GERMAN (TEXTBLOB_DE import textblob_de
import re #modul for regular expressions
from tweepy import API #Twitter API modul for more info: look tweepy doc please!
from tweepy import Cursor
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import tweepy #usage of diffrent feautres of my programm
import sys #only if wanted
import csv ##only if wanted (see rest of programm)
import pandas as pd #pandas for illustration
import authentification #access to twitter
import numpy as np #collection of tweets via numpy
import matplotlib.pyplot as plt #if needed (see below for ploting)
import numpy
#output screen (if you use pycharm for full screen view)
#only if needed
pd.set_option('display.max_rows', 1000000000000)
pd.set_option('display.max_columns', 1000000000)
pd.set_option('display.width', 100000000000)
pd.set_option('display.float_format', '{:20,.2f}'.format)
#for maximal OUTPUT!
#pd.set_option('display.max_colwidth', -1)
#TWITTER AUTHENTIFICTION (Twitter development)
#please fill for that the identification.py with your credentials!
#you need a twitter developer account for getting these informations
class TwitterAuthenticator():
def authenticate_twitter_app(self):
auth = OAuthHandler(authentification.CONSUMER_KEY, authentification.CONSUMER_SECRET)
auth.set_access_token(authentification.ACCESS_TOKEN, authentification.ACCESS_TOKEN_SECRET)
return auth
#TWITTER CLIENT SERVER
class TwitterClient():
def __init__(self, twitter_user=None):
self.auth = TwitterAuthenticator().authenticate_twitter_app()
self.twitter_client = API(self.auth)
self.twitter_user = twitter_user
def get_twitter_client_api(self):
return self.twitter_client
def get_user_timeline_tweets(self, num_tweets):
tweets = []
for tweet in Cursor(self.twitter_client.user_timeline, id=self.twitter_user).items(num_tweets):
tweets.append(tweet)
return tweets
def get_friend_list(self, num_friends):
friend_list = []
for friend in Cursor(self.twitter_client.friends, id=self.twitter_user).items(num_friends):
friend_list.append(friend)
return friend_list
def get_home_timeline_tweets(self, num_tweets):
home_timeline_tweets = []
for tweet in Cursor(self.twitter_client.home_timeline, id=self.twitter_user).items(num_tweets):
home_timeline_tweets.append(tweet)
return home_timeline_tweets
#TWITTER STREAMER FOR STREAMING AND LIVE TWEETS
class TwitterStreamer():
def __init__(self):
self.twitter_autenticator = TwitterAuthenticator()
def stream_tweets(self, fetched_tweets_filename, hash_tag_list):
# AUTHENTIFICATION AND CONNECTION TO API
listener = TwitterListener(fetched_tweets_filename)
auth = self.twitter_autenticator.authenticate_twitter_app()
stream = Stream(auth, listener)
#you can use the stream.filter for defining the search for words/hasthags!!!!!!
#same sentimentanalysis works for words or hashtags!!!
stream.filter(track=hash_tag_list)
#TWITTER STREAM LISTENER FOR PRINTING TWEETS
class TwitterListener(StreamListener):
def __init__(self, fetched_tweets_filename):
self.fetched_tweets_filename = fetched_tweets_filename
def on_data(self, data):
try:
print(data)
with open(self.fetched_tweets_filename, 'a') as tf:
tf.write(data)
return True
except BaseException as e:
print("Error on_data %s" % str(e))
return True
def on_error(self, status):
if status == 420:
#OCCURS IF RATE LIMIT IS PASSED
return False
print(status)
#FOR ANALYZING CLEANING TWEETS (TO CONTENT)
class TweetAnalyzer():
#DELTETE ALL UNNECESSARY CHARACTERS
def clean_tweet(self, tweet):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
#SIMPLE SENTIMENTANALYSIS VIA TEXTBLOB (englisch)
def analyze_sentiment(self, tweet):
analysis = TextBlob(self.clean_tweet(tweet))
if analysis.sentiment.polarity > 0:
return 1
elif analysis.sentiment.polarity == 0:
return 0
else:
return -1
#You can use the following classification of polarity for sentence based analysis
#since i am using this programm for document level classification I left it with 1 -1 and 0
# if (polarity == 0):
# print("Neutral")
# elif (polarity > 0 and polarity <= 0.3):
# print("Schwach positiv")
# elif (polarity > 0.3 and polarity <= 0.6):
# print("positiv")
# elif (polarity > 0.6 and polarity <= 1.0):
# print("Stark positiv")
# elif (polarity > -0.3 and polarity <= 0):
# print("schwach negativ")
# elif (polarity > -0.6 and polarity <= -0.3):
# print("Negativ")
# elif (polarity >= -1.0 and polarity <= -0.6):
# print("Stark negativ")
def tweets_to_data_frame(self, tweets):
df = | pd.DataFrame(data=[tweet.full_text for tweet in tweets], columns=['tweets']) | pandas.DataFrame |
from datetime import datetime
import pandas as pd
import pytest
from dask import dataframe as dd
import featuretools as ft
from featuretools import Relationship
from featuretools.tests.testing_utils import to_pandas
from featuretools.utils.gen_utils import import_or_none
ks = import_or_none('databricks.koalas')
@pytest.fixture
def values_es(es):
es.normalize_entity('log', 'values', 'value',
make_time_index=True,
new_entity_time_index="value_time")
return es
@pytest.fixture
def true_values_lti():
true_values_lti = pd.Series([datetime(2011, 4, 10, 10, 41, 0),
datetime(2011, 4, 9, 10, 31, 9),
datetime(2011, 4, 9, 10, 31, 18),
datetime(2011, 4, 9, 10, 31, 27),
datetime(2011, 4, 10, 10, 40, 1),
datetime(2011, 4, 10, 10, 41, 3),
datetime(2011, 4, 9, 10, 30, 12),
datetime(2011, 4, 10, 10, 41, 6),
datetime(2011, 4, 9, 10, 30, 18),
datetime(2011, 4, 9, 10, 30, 24),
datetime(2011, 4, 10, 11, 10, 3)])
return true_values_lti
@pytest.fixture
def true_sessions_lti():
sessions_lti = pd.Series([datetime(2011, 4, 9, 10, 30, 24),
datetime(2011, 4, 9, 10, 31, 27),
datetime(2011, 4, 9, 10, 40, 0),
datetime(2011, 4, 10, 10, 40, 1),
datetime(2011, 4, 10, 10, 41, 6),
datetime(2011, 4, 10, 11, 10, 3)])
return sessions_lti
@pytest.fixture
def wishlist_df():
wishlist_df = pd.DataFrame({
"session_id": [0, 1, 2, 2, 3, 4, 5],
"datetime": [datetime(2011, 4, 9, 10, 30, 15),
datetime(2011, 4, 9, 10, 31, 30),
datetime(2011, 4, 9, 10, 30, 30),
datetime(2011, 4, 9, 10, 35, 30),
datetime(2011, 4, 10, 10, 41, 0),
datetime(2011, 4, 10, 10, 39, 59),
datetime(2011, 4, 10, 11, 10, 2)],
"product_id": ['coke zero', 'taco clock', 'coke zero', 'car',
'toothpaste', 'brown bag', 'coke zero'],
})
return wishlist_df
@pytest.fixture
def extra_session_df(es):
row_values = {'customer_id': 2,
'device_name': 'PC',
'device_type': 0,
'id': 6}
row = pd.DataFrame(row_values, index=pd.Index([6], name='id'))
df = to_pandas(es['sessions'].df)
df = df.append(row, sort=True).sort_index()
if isinstance(es['sessions'].df, dd.DataFrame):
df = dd.from_pandas(df, npartitions=3)
if ks and isinstance(es['sessions'].df, ks.DataFrame):
df = ks.from_pandas(df)
return df
class TestLastTimeIndex(object):
def test_leaf(self, es):
es.add_last_time_indexes()
log = es['log']
assert len(log.last_time_index) == 17
log_df = to_pandas(log.df)
log_lti = to_pandas(log.last_time_index)
for v1, v2 in zip(log_lti, log_df['datetime']):
assert (pd.isnull(v1) and | pd.isnull(v2) | pandas.isnull |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 18 15:52:01 2020
modeling operation.
requirements = [
'matplotlib>=3.1.3',
'sklearn>=0.22.1',
'seaborn>=0.10.0',
'factor_analyzer>=0.3.2',
'joblib>=0.14.1',
]
@author: zoharslong
"""
from numpy import max as np_max, min as np_min, ravel as np_ravel, argsort as np_argsort, abs as np_abs
from numpy import array as np_array, int32 as np_int32, int64 as np_int64, float64 as np_float64
from pandas import DataFrame, concat as pd_concat
from re import findall as re_find
from random import sample as rnd_smp
from os.path import join as os_join
from pyzohar.sub_slt_bsc.bsz import lsz
from pyzohar.sub_slt_bsc.dfz import dfz
import seaborn as sns # plot on factor analysis
from factor_analyzer import FactorAnalyzer, calculate_kmo, calculate_bartlett_sphericity, Rotator
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.cluster import KMeans
from sklearn.svm import SVC, SVR
from sklearn.svm._classes import SVC as typ_svc, SVR as typ_svr
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
from joblib import dump as jb_dump, load as jb_load
class mdz_nit(dfz):
"""
modeling class initiation
>>> mdl = mdz_nit(DataFrame(),dfz_mpt=dfz(),dct_mdl={'_id':'','fld':'','clm_x':[],'clm_y':[],'drp':None,'kep':None})
>>> mdl.mdl_nit()
"""
def __init__(self, dts=None, lcn=None, *, spr=False, dfz_mpt=None, dct_mdl=None):
"""
more param of self is available, in [clm_x, clm_y, _x, _y, mdl]
:param dts: self.dts, premier than dfz_mpt
:param lcn: self.lcn, premier than dfz_mpt
:param spr: let self = self.dts
:param dfz_mpt: a dfz for inserted
:param dct_mdl: a dict of params, keys in ['_id', 'fld', 'clm_y', 'clm_x', 'drp', 'kep']
:param dct_mdl: more params are auto generated, in ['fls', 'smp_raw/_y_0/_y_1/_trn/_tst']
"""
if dfz_mpt is not None:
dts = dts if dts is not None else dfz_mpt.dts.copy()
lcn = lcn if lcn is not None else dfz_mpt.lcn.copy()
super(mdz_nit, self).__init__(dts, lcn, spr=spr)
self.clm_x, self.clm_y, self._x, self._y, self.mdl = None, None, None, None, None
self.mdl_nit(dct_mdl=dct_mdl)
def mdl_nit(self, dct_mdl=None, rst=True):
"""
针对model的初始化, 会使用self.dts对self.lcn.mdl的一系列params进行刷新, 通常在self.dts发生变化的时候必须调用
:param dct_mdl: a dict of params, keys in ['_id', 'fld', 'clm_y', 'clm_x', 'drp', 'kep']
:param rst: reset self.lcn.mdl or not, default True
:return: None
"""
if dct_mdl is not None:
mch_tmp = [self.lcn['mdl']['_id'], self.lcn['mdl']['clm_y'], self.lcn['mdl']['clm_x']] if \
'mdl' in self.lcn.keys() else None
dct_mdl['clm_x'] = lsz(dct_mdl['clm_x']).typ_to_lst(rtn=True)
dct_mdl['clm_y'] = lsz(dct_mdl['clm_y']).typ_to_lst(rtn=True)
self.lcn.update({'mdl': dct_mdl})
self.clm_x = self.lcn['mdl']['clm_x']
self.clm_y = self.lcn['mdl']['clm_y']
self.lcn['mdl']['fls'] = 'mdl_' + self.lcn['mdl']['_id'] + '_' + self.lcn['mdl']['clm_y'][0] + '.pkl'
if mch_tmp != [self.lcn['mdl']['_id'], self.lcn['mdl']['clm_y'], self.lcn['mdl']['clm_x']]:
self.mdl, self._x, self._y = None, None, None # 当参数发生较大改变时, 删除已经存在的self.mdl
if rst:
self.mdl_smp_trc()
def mdl_smp_trc(self):
"""
basic sample trace
"""
tmp = dfz(self.dts.copy(), self.lcn.copy())
if tmp.len > 0:
# 标记数据集筛选
if self.lcn['mdl']['drp']:
tmp.drp_dcm_ctt(self.lcn['mdl']['drp'], prm='drop', ndx_rst=False)
if self.lcn['mdl']['kep']:
tmp.drp_dcm_ctt(self.lcn['mdl']['kep'], prm='keep', ndx_rst=False)
# 生成样本参数
self.lcn['mdl']['smp_raw'] = {'ndx': tmp.dts.index.tolist(), 'clm_x': self.clm_x, 'clm_y': self.clm_y}
tmp.drp_dcm_ctt({self.clm_y[0]: 1}, prm='keep', ndx_rst=False)
self.lcn['mdl']['smp_y_1'] = {'ndx': tmp.dts.index.tolist(), 'clm_x': self.clm_x, 'clm_y': self.clm_y}
dff_ndx = lsz().mrg('differ', self.lcn['mdl']['smp_raw']['ndx'], self.lcn['mdl']['smp_y_1']['ndx'],
rtn=True)
self.lcn['mdl']['smp_y_0'] = {'ndx': dff_ndx, 'clm_x': self.clm_x, 'clm_y': self.clm_y}
self._dl_smp_blc() # generate smp_trn, smp_tst
def _dl_smp_blc(self):
"""
train/test sample balanced; 样本平衡策略, 以标志变量中较少的一方为基准, 保持标志平衡, 测试集占比10%
>>> # 另一种既有的训练集划分方法
>>> x_trn, x_tst, y_trn, y_tst = train_test_split(self._x, self._y, test_size=0.2, random_state=2)
"""
smp_min = min(len(self.lcn['mdl']['smp_y_1']['ndx']), len(self.lcn['mdl']['smp_y_0']['ndx']))
ndx_y_0 = rnd_smp(self.lcn['mdl']['smp_y_0']['ndx'], int(smp_min * 0.9))
ndx_y_1 = rnd_smp(self.lcn['mdl']['smp_y_1']['ndx'], int(smp_min * 0.9))
self.lcn['mdl']['smp_trn'] = {'ndx': ndx_y_0 + ndx_y_1, 'clm_x': self.clm_x, 'clm_y': self.clm_y}
ndx_t_0 = lsz().mrg('differ', self.lcn['mdl']['smp_y_0']['ndx'], ndx_y_0, rtn=True)
ndx_t_1 = lsz().mrg('differ', self.lcn['mdl']['smp_y_1']['ndx'], ndx_y_1, rtn=True)
if len(self.lcn['mdl']['smp_y_1']['ndx']) < len(self.lcn['mdl']['smp_y_0']['ndx']):
ndx_t_0 = rnd_smp(ndx_t_0, int(smp_min * 0.1))
else:
ndx_t_1 = rnd_smp(ndx_t_1, int(smp_min * 0.1))
self.lcn['mdl']['smp_tst'] = {'ndx': ndx_t_0 + ndx_t_1, 'clm_x': self.clm_x, 'clm_y': self.clm_y}
def mdl_smp_mpt(self, tgt=None, *, rtn=False):
"""
model sample from param to real datasets, saved in self._x and self._y
:param tgt: in [None, 'smp_raw', 'smp_trn', 'smp_tst']
:param rtn: if return (dtf_y, dtf_x) or not, default False; if return, self._x/_y will not change
:return: if rtn is True, return (dtf_y, dtf_x)
"""
tmp, y, x = dfz(self.dts, self.lcn), None, None
if tmp.len > 0:
if tgt is None: # 无样本集条件,则默认导入self.dts
y, x = tmp.dts[self.clm_y], tmp.dts[self.clm_x]
else: # 否则根据样本集参数选择样本集
tmp.drp_dcm(self.lcn['mdl'][tgt]['ndx'], prm='keep', ndx_rst=False)
y, x = tmp.dts[self.lcn['mdl'][tgt]['clm_y']], tmp.dts[self.lcn['mdl'][tgt]['clm_x']]
if rtn:
return y, x
self._y, self._x = y, x
def mdl_sav(self, *, fld=None, fls=None):
"""
save model into .pkl file
:param fld: default None, get fold location from self.lcn
:param fls: default None, get file location from self.lcn
:return: None
"""
fld = self.lcn['mdl']['fld'] if fld is None else fld
fls = self.lcn['mdl']['fls'] if fls is None else fls
jb_dump(self.mdl, os_join(fld, fls))
def mdl_lod(self, *, fld=None, fls=None):
"""
import model from .pkl file
:param fld: default None, get fold location from self.lcn
:param fls: default None, get file location from self.lcn
:return: None
"""
fld = self.lcn['mdl']['fld'] if fld is None else fld
fls = self.lcn['mdl']['fls'] if fls is None else fls
self.mdl = jb_load(os_join(fld, fls))
str_tmp = self.lcn['mdl']['_id']
self.lcn['mdl']['_id'] = re_find('mdl_(.*?)_' + self.lcn['mdl']['clm_y'][0], self.lcn['mdl']['fls'])[0]
if str_tmp != self.lcn['mdl']['_id']:
print('info: _id switch from %s to %s' % (str_tmp, self.lcn['mdl']['_id']))
def drp_dcm_for_bly(self, flt_smp=2):
"""
drop documents for the balance of Target label. 使用self.clm_y进行样本数量平衡, 会直接导致self.dts的改变
:param flt_smp: 选择数量较多的label进行抽样,使其数量达到flt_smp倍于数量较少的label
:return: None
"""
# 不同分类的样本数量控制
stm_1 = self.dts.loc[self.dts[self.clm_y[0]] == 1]
stm_0 = self.dts.loc[self.dts[self.clm_y[0]] == 0]
shp_0, shp_1 = stm_0.shape[0], stm_1.shape[0]
try:
stm_0 = stm_0.sample(int(shp_1 * flt_smp)) if shp_1*flt_smp < shp_0 else stm_0
stm_1 = stm_1.sample(int(shp_0 * flt_smp)) if shp_0*flt_smp < shp_1 else stm_1
except ValueError:
pass
self.dts = pd_concat([stm_1, stm_0])
self.mdl_nit()
class mdz_fct(mdz_nit):
"""
model factor analysis
聚类的验证 https://blog.csdn.net/u010159842/article/details/78624135
>>> mdl = mdz(DataFrame(),dfz_mpt=dfz(),dct_mdl={'_id':'','fld':'','clm_x':[],'clm_y':[],'drp':None,'kep':None})
>>> mdl.drp_dcm_na(mdl.clm_x) # 删除为空的数据行
>>> mdl.mdl_nit() # 刷新参数集状态, 需要在self.dts发生行列变动时执行
>>> mdl.mdl_smp_mpt() # 根据参数集生成数据集 [None, 'smp_trn', 'smp_tst', 'smp_raw']
>>> mdl.rnd_frs_clf() # 随机森林变量重要性检验
>>> mdl.fct_fit(10, prm='chksav') # 拟合模型 ['chk', 'sav']
>>> mdl.fct_transform(['电话积极','带看高效','上线稳定','电话稳定','在岗稳定','带看稳定']) # 应用模型
"""
def __init__(self, dts=None, lcn=None, *, spr=False, dfz_mpt=None, dct_mdl=None):
"""
more param of self is available, in [clm_x, clm_y, _x, _y, mdl]
:param dts: self.dts, premier than dfz_mpt
:param lcn: self.lcn, premier than dfz_mpt
:param spr: let self = self.dts
:param dfz_mpt: a dfz for inserted
:param dct_mdl: a dict of params, keys in ['_id', 'fld', 'clm_y', 'clm_x', 'drp', 'kep']
:param dct_mdl: more params are auto generated, in ['fls', 'smp_raw/_y_0/_y_1/_trn/_tst']
"""
super(mdz_fct, self).__init__(dts, lcn, spr=spr, dfz_mpt=dfz_mpt, dct_mdl=dct_mdl)
def fct_fit(self, nfc=3, mtd='principal', rtt='varimax', *, prm='chksav'):
"""
factor fit step, to check or save model
:param nfc: number of factors, default 3
:param mtd: method, default 'principal'
:param rtt: rotation method, default 'varimax'
:param prm: in ['chk', 'sav', ''] for [model check, save to file, import self.mdl only]
:return: None
"""
self.mdl = FactorAnalyzer(n_factors=nfc, method=mtd, rotation=rtt)
self.mdl.fit(self._x)
if re_find('chk', prm):
self._ct_fit_chk()
if re_find('sav', prm):
self.mdl_sav()
def _ct_fit_chk(self, prm='kmocmmegnvrnlod'):
"""
几种常见的factor检查方法
:param prm: ['kmo/cmm/egn/vrn/lod'] for kmo, communalities, eigenvalues, variance, load matrix
:return: indicators and pictures in prm
"""
if re_find('kmo', prm):
print('kmo %.4f; bartlett %.4f.' % (self._ct_fit_chk_kmo())) # kmo
if re_find('cmm', prm):
print('communalities: %s' % self.mdl.get_communalities()) # 公因子方差
if re_find('egn', prm):
self._ct_fit_chk_egn() # egn图
if re_find('vrn', prm):
self._ct_fit_chk_vrn() # 因子代表性
if re_find('lod', prm):
self._ct_fit_chk_lod() # 载荷矩阵重要性和命名
def _ct_fit_chk_kmo(self):
"""
kmo检验和巴特利特, 前者要求尽可能大, 后者要求<0.05
:return: [kmo_model, bartlett]
"""
kmo_all, kmo_model = calculate_kmo(self._x)
bartlett = round(calculate_bartlett_sphericity(self._x)[1], 4)
return kmo_model, bartlett
def _ct_fit_chk_egn(self):
"""
eigenvalues to check factor number
:return: a picture of eigenvalues
"""
ev, v = self.mdl.get_eigenvalues() # eigen values 特征值; 通常要求大于1
plt.scatter(range(1, ev.shape[0] + 1), ev)
plt.plot(range(1, ev.shape[0] + 1), ev)
plt.title('Scree Plot')
plt.xlabel('Factors')
plt.ylabel('Eigenvalue')
plt.grid()
plt.show()
def _ct_fit_chk_vrn(self):
"""
variance plot to check factor number
:return: a picture of variance
"""
vrn = self.mdl.get_factor_variance() # 因子的累积贡献度
plt.scatter(range(1, vrn[2].shape[0] + 1), vrn[2])
plt.plot(range(1, vrn[2].shape[0] + 1), vrn[2])
plt.title('Scree Plot')
plt.xlabel('Factors')
plt.ylabel('variance')
plt.grid()
plt.show()
def _ct_fit_chk_lod(self, *, prn=False):
"""
load plot to check factor number
:param prn: save the picture or not, default False
:return: a picture of load matrix
"""
rtr = Rotator()
load_matrix = rtr.fit_transform(self.mdl.loadings_)
sns.set(font="simhei")
df_cm = DataFrame(np_abs(load_matrix), index=self._x.columns)
plt.figure(figsize=(8, 8))
ax = sns.heatmap(df_cm, annot=True, cmap="BuPu")
ax.yaxis.set_tick_params(labelsize=10) # 设置y轴的字体的大小
plt.title('Factor Analysis', fontsize='xx-large')
plt.ylabel('Sepal Width', fontsize='xx-large') # Set y-axis label
plt.show()
if prn:
plt.savefig('factorAnalysis.png', dpi=300)
def fct_transform(self, rnm=None, *, rtn=False):
"""
factor analysis transform by self.mdl, merge factors on the left side of self.dts.
:param rnm: new name of factors in list
:param rtn: if return factors or not, default False
:return: if rtn is True, return factors in type dataframe
"""
if self.mdl is None:
self.mdl_lod()
vrn = self.mdl.get_factor_variance()
dtf_fct = self.mdl.transform(self._x)
# 构造命名
dct_fct = {i: 'fct_' + str(i) for i in range(len(vrn[0]))}
if rnm is not None:
dct_tmp = {i: rnm[i] for i in range(len(rnm))} if type(rnm) in [list] else rnm.copy()
dct_fct.update(dct_tmp)
# 计算总分
lst = []
for i in dtf_fct:
lst.append(sum([i[j] * vrn[1][j] for j in range(len(i))]))
dtf_fnl = | DataFrame(dtf_fct, index=self._x.index) | pandas.DataFrame |
#!/usr/bin/env python
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
import os
import numpy as np
import pandas as pd
from typeI_analysis import mae, rmse, barplot_with_CI_errorbars
from typeI_analysis import compute_bootstrap_statistics
import shutil
import seaborn as sns
from matplotlib import pyplot as plt
# =============================================================================
# PLOTTING FUNCTIONS
# =============================================================================
def barplot_with_CI_errorbars_and_2groups(df1, df2, x_label, y_label, y_lower_label, y_upper_label):
"""Creates bar plot of a given dataframe with asymmetric error bars for y axis.
Args:
df: Pandas Dataframe that should have columns with columnnames specified in other arguments.
x_label: str, column name of x axis categories
y_label: str, column name of y axis values
y_lower_label: str, column name of lower error values of y axis
y_upper_label: str, column name of upper error values of y axis
"""
# Column names for new columns for delta y_err which is calculated as | y_err - y |
delta_lower_yerr_label = "$\Delta$" + y_lower_label
delta_upper_yerr_label = "$\Delta$" + y_upper_label
# Color
current_palette = sns.color_palette()
#current_palette = sns.color_palette("GnBu_d")
error_color = sns.color_palette("GnBu_d")[0]
# Plot style
plt.close()
plt.style.use(["seaborn-talk", "seaborn-whitegrid"])
plt.rcParams['axes.labelsize'] = 18
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 16
plt.tight_layout()
bar_width = 0.45
# Plot 1st group of data
data = df1 # Pandas DataFrame
data[delta_lower_yerr_label] = data[y_label] - data[y_lower_label]
data[delta_upper_yerr_label] = data[y_upper_label] - data[y_label]
x = range(len(data[y_label]))
y = data[y_label]
plt.bar(x, y, label = "QM", width=bar_width, color=current_palette[0])
plt.xticks(x, data[x_label], rotation=90)
plt.errorbar(x, y, yerr=(data[delta_lower_yerr_label], data[delta_upper_yerr_label]),
fmt="none", ecolor=error_color, capsize=3, capthick=True, elinewidth=1.5)
# Plot 2nd group of data
data = df2 # Pandas DataFrame
data[delta_lower_yerr_label] = data[y_label] - data[y_lower_label]
data[delta_upper_yerr_label] = data[y_upper_label] - data[y_label]
index = np.arange(df2.shape[0])
x = range(len(data[y_label]))
y = data[y_label]
#plt.bar(x, y)
plt.bar(index + bar_width, y, label = "Empirical", width=bar_width, color=sns.color_palette("BuGn_r")[3])
plt.xticks(index + bar_width/2, data[x_label], rotation=90)
plt.errorbar(index + bar_width, y, yerr=(data[delta_lower_yerr_label], data[delta_upper_yerr_label]),
fmt="none", ecolor=sns.color_palette("BuGn_r")[1], capsize=3, capthick=True, elinewidth=1.5)
plt.xlabel(x_label)
plt.ylabel(y_label)
def barplot_with_CI_errorbars_and_1st_of_2groups(df1, df2, x_label, y_label, y_lower_label, y_upper_label):
"""Creates bar plot of a given dataframe with asymmetric error bars for y axis.
Args:
df: Pandas Dataframe that should have columns with columnnames specified in other arguments.
x_label: str, column name of x axis categories
y_label: str, column name of y axis values
y_lower_label: str, column name of lower error values of y axis
y_upper_label: str, column name of upper error values of y axis
"""
# Column names for new columns for delta y_err which is calculated as | y_err - y |
delta_lower_yerr_label = "$\Delta$" + y_lower_label
delta_upper_yerr_label = "$\Delta$" + y_upper_label
# Color
current_palette = sns.color_palette()
#current_palette = sns.color_palette("GnBu_d")
error_color = sns.color_palette("GnBu_d")[0]
# Plot style
plt.close()
plt.style.use(["seaborn-talk", "seaborn-whitegrid"])
plt.rcParams['axes.labelsize'] = 18
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 16
plt.tight_layout()
bar_width = 0.45
# Plot 1st group of data
data = df1 # Pandas DataFrame
data[delta_lower_yerr_label] = data[y_label] - data[y_lower_label]
data[delta_upper_yerr_label] = data[y_upper_label] - data[y_label]
x = range(len(data[y_label]))
y = data[y_label]
plt.bar(x, y, label = "QM", width=bar_width, color=current_palette[0])
plt.xticks(x, data[x_label], rotation=90)
plt.errorbar(x, y, yerr=(data[delta_lower_yerr_label], data[delta_upper_yerr_label]),
fmt="none", ecolor=error_color, capsize=3, capthick=True, elinewidth=1.5)
#index = np.arange(df2.shape[0])
#plt.xticks(index + bar_width/2, data[x_label], rotation=90)
plt.xlabel(x_label)
plt.ylabel(y_label)
def stacked_barplot_2groups(df, x_label, y_label1, y_label2, fig_size=(10, 7), invert=False):
# Color
grays = ["#95a5a6", "#34495e"]
current_palette = sns.color_palette(grays)
# Plot style
plt.close()
plt.style.use(["seaborn-talk", "seaborn-whitegrid"])
plt.rcParams['axes.labelsize'] = 18
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 16
plt.tight_layout()
bar_width = 0.70
plt.figure(figsize=fig_size)
data = df # Pandas DataFrame
x = range(len(data[x_label]))
y1 = data[y_label1]
y2 = data[y_label2]
p1 = plt.bar(x, y1, width=bar_width, color=current_palette[0])
p2 = plt.bar(x, y2, width=bar_width, bottom=y1, color=current_palette[1])
plt.xticks(x, data[x_label], rotation=90)
plt.xlabel(x_label)
plt.ylabel("number of $pK_{a}s$")
plt.legend((p1[0], p2[0]), (y_label1, y_label2))
# Flip plot upside down
if invert == True:
ax = plt.gca()
ax.invert_yaxis()
# =============================================================================
# CONSTANTS
# =============================================================================
# Paths to input data.
PKA_TYPEI_CLOSEST_COLLECTION_PATH = './analysis_outputs_closest/typeI_submission_collection.csv'
PKA_TYPEI_HUNGARIAN_COLLECTION_PATH = './analysis_outputs_hungarian/typeI_submission_collection.csv'
PKA_TYPEI_CLOSEST_FULL_COLLECTION_PATH = './analysis_outputs_closest/typeI_submission_full_collection.csv'
PKA_TYPEI_HUNGARIAN_FULL_COLLECTION_PATH = './analysis_outputs_hungarian/typeI_submission_full_collection.csv'
# =============================================================================
# UTILITY FUNCTIONS
# =============================================================================
def read_collection_file(matching_algorithm):
"""
Function to read SAMPL6 collection CSV file that was created by pKaTypeISubmissionCollection.
:param matching_algorithm: 'closest' or 'hungarian'
:return: Pandas DataFrame
"""
# Select collection file path
if algorithm == 'closest':
collection_file_path = PKA_TYPEI_CLOSEST_COLLECTION_PATH
elif algorithm == 'hungarian':
collection_file_path = PKA_TYPEI_HUNGARIAN_COLLECTION_PATH
else:
raise Exception("Correct matching algorithm not specified. Should be 'closest' or 'hungarian', or both.")
# Check if submission collection file already exists.
if os.path.isfile(collection_file_path):
print("Analysis will be done using the existing collection file: {}".format(collection_file_path))
collection_df = pd.read_csv(collection_file_path, index_col=0)
print("\n SubmissionCollection: \n")
print(collection_df)
else:
raise Exception("Collection file doesn't exist: {}".format(collection_file_path))
return collection_df
def read_full_collection_file(matching_algorithm):
"""
Function to read SAMPL6 full collection CSV file that was created by pKaTypeISubmissionCollection.
Full collection has entries of matched prediction and also unmatched predictions and unmatched
experimental pKas for each submission.
:param matching_algorithm: 'closest' or 'hungarian'
:return: Pandas DataFrame
"""
# Select collection file path
if algorithm == 'closest':
collection_file_path = PKA_TYPEI_CLOSEST_FULL_COLLECTION_PATH
elif algorithm == 'hungarian':
collection_file_path = PKA_TYPEI_HUNGARIAN_FULL_COLLECTION_PATH
else:
raise Exception("Correct matching algorithm not specified. Should be 'closest' or 'hungarian', or both.")
# Check if submission collection file already exists.
if os.path.isfile(collection_file_path):
print("Analysis will be done using the existing collection file: {}".format(collection_file_path))
collection_df = pd.read_csv(collection_file_path, index_col=0)
print("\n SubmissionFullCollection: \n")
print(collection_df)
else:
raise Exception("Collection file doesn't exist: {}".format(collection_file_path))
return collection_df
def calc_MAE_for_molecules_across_all_predictions(collection_df, directory_path, file_base_name):
"""
Calculate mean absolute error for each molecule for all methods.
:param collection_df: Pandas DataFrame of submission collection.
:param directory_path: Directory for outputs
:param file_base_name: Filename for outputs
:return:
"""
# Create list of Molecule IDs
mol_IDs= list(set(collection_df["Molecule ID"].values)) # List of unique IDs
mol_IDs.sort()
print(mol_IDs)
# List for keeping records of stats values for each molecule
molecular_statistics = []
# Slice the dataframe for each molecule to calculate MAE
for mol_ID in mol_IDs:
collection_df_mol_slice = collection_df.loc[collection_df["Molecule ID"] == mol_ID]
# 2D array of matched calculated and experimental pKas
data = collection_df_mol_slice[["pKa (calc)", "pKa (exp)"]].values
# Calculate mean absolute error
#MAE_value = mae(data)
# Calculate MAE and RMSE and their 95% confidence intervals
bootstrap_statistics = compute_bootstrap_statistics(samples=data, stats_funcs=[mae, rmse], percentile=0.95,
n_bootstrap_samples=10000)
MAE = bootstrap_statistics[0][0]
MAE_lower_CI = bootstrap_statistics[0][1][0]
MAE_upper_CI = bootstrap_statistics[0][1][1]
print("{} MAE: {} [{}, {}]".format(mol_ID, MAE, MAE_lower_CI, MAE_upper_CI))
RMSE = bootstrap_statistics[1][0]
RMSE_lower_CI = bootstrap_statistics[1][1][0]
RMSE_upper_CI = bootstrap_statistics[1][1][1]
print("{} RMSE: {} [{}, {}]\n".format(mol_ID, RMSE, RMSE_lower_CI, RMSE_upper_CI))
# Record in CSV file
molecular_statistics.append({'Molecule ID': mol_ID, 'MAE': MAE, 'MAE_lower_CI': MAE_lower_CI,
'MAE_upper_CI': MAE_upper_CI, 'RMSE': RMSE, 'RMSE_lower_CI': RMSE_lower_CI,
'RMSE_upper_CI': RMSE_upper_CI})
# Convert dictionary to Dataframe to create tables/plots easily and save as CSV.
molecular_statistics_df = pd.DataFrame(molecular_statistics)
#molecular_statistics_df.set_index('Molecule ID', inplace=True)
# Sort values by MAE values
molecular_statistics_df.sort_values(by='MAE', inplace=True)
# Create CSV
os.makedirs(directory_path)
file_base_path = os.path.join(directory_path, file_base_name)
with open(file_base_path + '.csv', 'w') as f:
molecular_statistics_df.to_csv(f)
# Plot MAE and RMSE of each molecule across predictions as a bar plot
barplot_with_CI_errorbars(df = molecular_statistics_df, x_label = 'Molecule ID',
y_label = 'MAE', y_lower_label = 'MAE_lower_CI', y_upper_label = 'MAE_upper_CI')
plt.savefig(directory_path + "/MAE_vs_molecule_plot.pdf")
barplot_with_CI_errorbars(df=molecular_statistics_df, x_label = 'Molecule ID',
y_label = 'RMSE', y_lower_label = 'RMSE_lower_CI', y_upper_label = 'RMSE_upper_CI')
plt.savefig(directory_path + "/RMSE_vs_molecule_plot.pdf")
def select_subsection_of_collection(collection_df, method_df, method_group):
"""
Returns a dataframe which is the subset of rows of collecion dataframe that match the requested method group:
QM or Empirical.
:param collection_df: Pandas DataFrame of submission collection.
:param method_df: Pandas DataFrame of method map file
:param method_group: String that specifies with method group is requested. "QM" or "Empirical"
:return: Pandas DataFrame of subsection of submission collection.
"""
print("Looking for submissions of selected method group...")
print("Method group: {}".format(method_group))
methods_of_selected_group = list()
if method_group == "QM":
methods_of_selected_group = ["QM", "QM + MM", "QM + LEC"]
elif method_group == "Empirical":
methods_of_selected_group = ["LFER", "QSPR/ML", "DL"]
else:
print("Specify method group as 'QM' or 'Empirical'.")
print("methods_of_selected_group:{}".format(methods_of_selected_group))
# Collect submission IDs of QM or empirical based methods from method map
submisssion_IDs_of_selected_group = list()
# Iterate through method map
for i in range(method_df.shape[0]):
method = method_df.loc[i,"Detailed Method Category"]
if method in methods_of_selected_group:
# Check columns "typeI submission ID", "typeII submission ID", and "typeI submission ID"
# to collect submission IDs of submission of each method group
#typeI submission ID
sub_id = method_df.loc[i, "typeI submission ID"]
print("sub_id: {}, method: {}".format(sub_id, method))
# If sub_id exists, add it to the submission ID list
try:
if len(sub_id) >3 :
submisssion_IDs_of_selected_group.append(sub_id)
except TypeError:
print("No Submission ID found.")
# typeII submission ID
sub_id = method_df.loc[i, "typeII submission ID"]
print("sub_id: {}, method: {}".format(sub_id, method))
# If sub_id exists, add it to the submission ID list
try:
if len(sub_id) > 3:
submisssion_IDs_of_selected_group.append(sub_id)
except TypeError:
print("No Submission ID found.")
# typeIII submission ID
sub_id = method_df.loc[i, "typeIII submission ID"]
print("sub_id: {}, method: {}".format(sub_id, method))
# If sub_id exists, add it to the submission ID list
try:
if len(sub_id) > 3:
submisssion_IDs_of_selected_group.append(sub_id)
except TypeError:
print("No Submission ID found.")
print("Submisssion_IDs_of_selected_group: {} \n".format(submisssion_IDs_of_selected_group))
# Filter collection dataframe based on submission IDs(receipt IDs) of selected method group
collection_df_of_selected_method_group = collection_df[collection_df["receipt_id"].isin(submisssion_IDs_of_selected_group)]
print("collection_df_of_selected_method_group: \n {}".format(collection_df_of_selected_method_group))
return collection_df_of_selected_method_group
def calc_MAE_for_molecules_across_selected_predictions(collection_df, method_df, selected_method_group, directory_path, file_base_name):
"""
Calculates mean absolute error for each molecule across prediction methods based on QM (QM, QM+LEC, QM+MM)
:param collection_df: Pandas DataFrame of submission collection.
:param method_df: Pandas DataFrame of method map.
:param selected_method_group: "QM" or "Empirical"
:param directory_path: Directory path for outputs
:param file_base_name: Output file name
:return:
"""
# Create subsection of collection dataframe for selected methods
collection_df_subset = select_subsection_of_collection(collection_df=collection_df, method_df=method_df, method_group=selected_method_group)
subset_directory_path = os.path.join(directory_path, selected_method_group)
# Calculate MAE using subsection of collection database
calc_MAE_for_molecules_across_all_predictions(collection_df=collection_df_subset, directory_path=subset_directory_path, file_base_name=file_base_name)
def create_comparison_plot_of_molecular_MAE_of_method_groups(directory_path, group1, group2, file_base_name):
#group1 = "QM"
#group2 = "Empirical"
# Read MAE dataframes
df_qm = pd.read_csv(directory_path + "/" + group1 + "/molecular_error_statistics_for_QM_methods.csv" )
df_empirical = pd.read_csv(directory_path + "/" + group2 + "/molecular_error_statistics_for_empirical_methods.csv")
# Reorder dataframes based on the order of molecular MAE statistic of QM methods
ordered_molecule_list = list(df_qm["Molecule ID"])
print("ordered_molecule_list: \n", ordered_molecule_list)
df_empirical_reordered = df_empirical.set_index("Molecule ID")
df_empirical_reordered = df_empirical_reordered.reindex(index=df_qm['Molecule ID'])
df_empirical_reordered = df_empirical_reordered.reset_index()
# Plot
# Molecular labels will be taken from 1st dataframe, so the second dataframe should have the same molecule ID order.
barplot_with_CI_errorbars_and_2groups(df1=df_qm, df2=df_empirical_reordered, x_label="Molecule ID", y_label="MAE",
y_lower_label="MAE_lower_CI", y_upper_label="MAE_upper_CI")
plt.savefig(molecular_statistics_directory_path + "/" + file_base_name + ".pdf")
# Same comparison plot with only QM results (only for presentation effects)
barplot_with_CI_errorbars_and_1st_of_2groups(df1=df_qm, df2=df_empirical_reordered, x_label="Molecule ID", y_label="MAE",
y_lower_label="MAE_lower_CI", y_upper_label="MAE_upper_CI")
plt.savefig(molecular_statistics_directory_path + "/" + file_base_name + "_only_QM.pdf")
def calculate_unmatched_pKa_statistics(full_collection_df, directory_path, file_base_name, merged_file_base_name):
# Slice dataframe by receipt ID
receipt_IDs = set(full_collection_df["receipt_id"])
unmatched_pKa_statistics = []
for receipt_ID in receipt_IDs:
df_1method = full_collection_df[full_collection_df["receipt_id"] == receipt_ID]
# print("Full collection of submission {}:".format(receipt_ID))
# print(df_1method)
print("\nAnalyzing full collection of submission {} to determine the number of unmatched pKas:".format(
receipt_ID))
# How many unmatched experimental pKas are recorded?
df_1method_unmatched_exp = df_1method[df_1method["pKa (calc)"] == "--"]
num_unmatched_exp_pKa = df_1method_unmatched_exp.shape[0]
# print("\ndf_1method_unmatched_exp:\n", df_1method_unmatched_exp)
print("Number of unmatched experimental pKa:", num_unmatched_exp_pKa)
# How many unmatched predicted pKas are recorded?
df_1method_unmatched_pred = df_1method[df_1method["pKa (exp)"] == "--"]
num_unmatched_pred_pKa = df_1method_unmatched_pred.shape[0]
# print("\ndf_1method_unmatched_pred:\n", df_1method_unmatched_pred)
# print("num_unmatched_pred_pKa:", num_unmatched_pred_pKa )
# How many unmatched predicted pKas are recorded between pKa 2-12?
df_1method_unmatched_pred['pKa (calc)'] = df_1method_unmatched_pred['pKa (calc)'].astype(float)
df_1method_unmatched_pred_2 = df_1method_unmatched_pred[2.0 <= df_1method_unmatched_pred["pKa (calc)"]]
df_1method_unmatched_pred_2_12 = df_1method_unmatched_pred_2[df_1method_unmatched_pred_2["pKa (calc)"] <= 12.0]
# print("\ndf_1method_unmatched_pred_2_12:\n", df_1method_unmatched_pred_2_12)
num_unmatched_pred_pKa_2_12 = df_1method_unmatched_pred_2_12.shape[0]
print("Number of unmatched predicted pKa between 2-12:", num_unmatched_pred_pKa_2_12)
# How many unmatched predicted pKas are recorded between pKa 4-10?
df_1method_unmatched_pred['pKa (calc)'] = df_1method_unmatched_pred['pKa (calc)'].astype(float)
df_1method_unmatched_pred_4 = df_1method_unmatched_pred[4.0 <= df_1method_unmatched_pred["pKa (calc)"]]
df_1method_unmatched_pred_4_10 = df_1method_unmatched_pred_4[df_1method_unmatched_pred_4["pKa (calc)"] <= 10.0]
# print("\ndf_1method_unmatched_pred_4_10:\n", df_1method_unmatched_pred_4_10)
num_unmatched_pred_pKa_4_10 = df_1method_unmatched_pred_4_10.shape[0]
print("Number of unmatched predicted pKa between 4-10:", num_unmatched_pred_pKa_4_10)
# Append to a list to later save as a CSV
unmatched_pKa_statistics.append({
'ID': receipt_ID,
'unmatched exp pKas': num_unmatched_exp_pKa,
'unmatched pred pKas': num_unmatched_pred_pKa,
'unmatched pred pKas [2,12]': num_unmatched_pred_pKa_2_12,
'unmatched pred pKas [4,10]': num_unmatched_pred_pKa_4_10
})
# Transform into Pandas DataFrame.
df_unmatched_pKa_statistics = pd.DataFrame(data=unmatched_pKa_statistics)
unmatched_pKa_statistics_filename = directory_path + "/" + file_base_name + ".csv"
df_unmatched_pKa_statistics.to_csv(unmatched_pKa_statistics_filename, index=False)
# Merge statistics table and unmatched pKa statistics table and save as a new file
statistics_filename = statistics_directory_path + '/statistics.csv'
df_statistics = | pd.read_csv(statistics_filename, index_col=False) | pandas.read_csv |
"""
Market Data Presenter.
This module contains implementations of the DataPresenter abstract class, which
is responsible for presenting data in the form of mxnet tensors. Each
implementation presents a different subset of the available data, allowing
different models to make use of similar data.
"""
from typing import Dict, List, Optional, Tuple
from abc import abstractmethod
import pandas as pd
import numpy as np
from mxnet import ndarray as nd
from . import providers, utils
class DataPresenter:
"""
Abstract class defining the DataProvider API.
"""
@abstractmethod
def get_training_batch(self, size: int):
"""
Returns a batch of training data, partitioned from the validation data,
of size +size+.
"""
@abstractmethod
def get_validation_batch(self, size: int):
"""
Returns a batch of validation data, partitioned from the training data,
of size +size+.
"""
@abstractmethod
def data_array(self, timestamp: pd.Timestamp):
"""
Returns the data associated with a single +timestamp+ in mxnet form
"""
@abstractmethod
def data_frame(self, timestamp: pd.Timestamp):
"""
Returns the data associated with a single +timestamp+ in pandas form.
"""
@abstractmethod
def data_features(self) -> List[str]:
"""
Returns a list of data features in the same order as presented in the
frames.
"""
class IntradayPresenter:
"""
Loads data consisting only of intraday information, guaranteed to keep all
within market hours.
"""
# All it does is load data - no other calls necessary
# pylint: disable=too-few-public-methods
def __init__(self, provider: providers.DataProvider, *, window: int = 45,
valid_seed: int = 0, lookahead: int = 10,
normalize: bool = True, features: Dict[str, bool] = {},
**kwargs):
"""
Init function. Takes a +provider+ from which it extracts data and
a variety of other arguments. See info files for examples.
"""
# pylint: disable=too-many-instance-attributes
# Store basic setup parameters
self.provider = provider
self._window = window
self._valid_seed = valid_seed
self._lookahead = lookahead
self._normalize = normalize
self._features = [feat for feat in features if features[feat]]
self._outputs = []
# Collect and decide features
for feature in self._features:
# First handle special features
if feature == 'macd':
self._outputs.append('macd_signal')
if feature == 'vortex':
self._outputs.extend(['vortex+', 'vortex-'])
continue
if feature == 'stochastic':
self._outputs.extend(['%K', '%D'])
continue
if feature == 'williams':
self._outputs.append('%R')
continue
if feature == 'dysart':
self._outputs.extend(['pvi', 'nvi'])
continue
if feature == 'bollinger':
self._outputs.extend(['bollinger+', 'bollinger=', 'bollinger-'])
continue
# Then add all others
self._outputs.append(feature)
# Decide range of possible dates in advance
self._first = provider.first()
# TODO don't limit this anymore
self._latest = provider.latest() - pd.to_timedelta(2, unit='day')
# Cache for already processed data to cut down on disk usage
self._train_cache = {}
self._val_cache = {}
# Cache of holidays to prevent continuously recalculating them
self._holidays = utils.trading_holidays(self._first - pd.to_timedelta(1, unit='day'),
self._latest)
self._half_days = utils.trading_half_days(self._first - pd.to_timedelta(1, unit='day'),
self._latest)
def get_training_batch(self, size: int) -> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns a batch of training data, partitioned from the validation data,
of size +size+.
"""
return self._get_batch(size, validation=False)
def get_validation_batch(self, size: int) -> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns a batch of validation data, partitioned from the training data,
of size +size+.
"""
return self._get_batch(size, validation=True)
def data_array(self, timestamp: pd.Timestamp) -> nd.NDArray:
"""
Returns the data associated with a single +timestamp+ in mxnet form
"""
start_time = timestamp - pd.to_timedelta(self._window, unit='min')
return self._get_data(start_time, False)[0]
@abstractmethod
def data_frame(self, timestamp: pd.Timestamp):
"""
Returns the data associated with a single +timestamp+ in pandas form.
"""
data = self._extract_daily_data(timestamp)
if data is None:
return None
return data.loc[timestamp, :]
def _get_data(self, time: pd.Timestamp, validation: bool) \
-> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns a simgle data sample starting at a given +time+. Uses
+validation+ to distinguish between training and validation sets.
NOTE: This function assumes that the entire data window is available.
If a time provided is too late to obtain a full window, behavior
is UNPREDICTABLE.
"""
# Check if the sample has already been cached.
day = time.floor('D')
start_index = (time.hour - 9) * 60 + (time.minute - 30)
end_index = start_index + self._window
if validation and day in self._val_cache:
data, target = self._val_cache[day]
return data[start_index: end_index], target[start_index: end_index]
if not validation and day in self._train_cache:
data, target = self._train_cache[day]
return data[start_index: end_index], target[start_index: end_index]
# Otherwase generate, cache, and return it
data, target = self._to_daily_input_data(day)
if validation:
self._val_cache[day] = (data, target)
else:
self._train_cache[day] = (data, target)
return data[start_index: end_index], target[start_index: end_index]
def _to_daily_input_data(self, date: pd.Timestamp) \
-> Tuple[nd.NDArray, nd.NDArray]:
"""
Transforms a set of intraday data for a +date+ to an array appropriate
for input to the model, and a target set of predictions against which
to compare outputs.
"""
# Gather data requested data components. Note that this seemingly
# over-complicated method guarantees that they remain in the order
# prescribed by the feature list.
datas = []
for feat in self._outputs:
if feat == "high":
datas.append(_to_intraday_high(date, self.provider,
normalize=self._normalize))
elif feat == "low":
datas.append(_to_intraday_low(date, self.provider,
normalize=self._normalize))
elif feat == "change":
datas.append(_to_intraday_change(date, self.provider,
normalize=self._normalize))
elif feat == "open":
datas.append(_to_intraday_open(date, self.provider,
normalize=self._normalize))
elif feat == "volume":
datas.append(_to_intraday_volume(date, self.provider,
normalize=self._normalize))
elif feat == "time":
datas.append(_to_intraday_time(date, self.provider,
normalize=self._normalize))
elif feat == "macd":
# For MACD, include both MACD and its signal
macd, macd_signal = _to_intraday_macd(date, self.provider,
normalize=self._normalize)
datas.extend([macd_signal, macd])
elif feat == "mass_index":
datas.append(_to_intraday_mass_index(date, self.provider))
elif feat == "trix15":
datas.append(_to_intraday_trix(date, self.provider, 15))
elif feat == "vortex+":
vortex_up, vortex_down = _to_intraday_vortex(date,
self.provider, 25)
datas.extend([vortex_up, vortex_down])
elif feat == "%K":
pK, pD = _to_intraday_stochastic(date, self.provider, 30)
datas.extend([pK, pD])
elif feat == "rsi":
datas.append(_to_intraday_rsi(date, self.provider, 14))
elif feat == "%R":
# The Williams %R is mathematically equivalent to (1 - %K). It
# is duplicated here to obtain a shorter period.
pK, _ = _to_intraday_stochastic(date, self.provider, 10)
datas.append(pK - 1)
elif feat == "accdist":
datas.append(_to_intraday_accdist(date, self.provider))
elif feat == "mfi":
datas.append(_to_intraday_mfi(date, self.provider, 30))
elif feat == "vpt":
datas.append(_to_intraday_vpt(date, self.provider))
elif feat == "obv":
datas.append(_to_intraday_obv(date, self.provider))
elif feat == "pvi":
pvi, nvi = _to_intraday_dysart(date, self.provider)
datas.extend([pvi, nvi])
elif feat == "bollinger+":
b_top, b_mid, b_bottom = _to_intraday_bollinger(date,
self.provider,
30, 2)
datas.extend([b_top, b_mid, b_bottom])
elif feat == "ultimate":
datas.append(_to_intraday_ultimate(date, self.provider))
elif feat == "cci":
datas.append(_to_intraday_cci(date, self.provider))
elif feat == "target":
datas.append(_to_intraday_target(date, self.provider,
self._lookahead,
normalize=self._normalize))
# Gather target data and return data/target arrays
target = _to_intraday_target(date, self.provider, self._lookahead,
normalize=self._normalize)
return nd.stack(*datas, axis=1), target.reshape(-1, 1)
def _extract_daily_data(self, date: pd.Timestamp) -> Optional[pd.DataFrame]:
"""
Gets the market data for a given day, restricted to market hours.
"""
data = self.provider.intraday(date)
if data is None or data.empty:
return None
return data
def _get_batch(self, batch_size: int, validation: bool = False) \
-> Tuple[nd.NDArray, nd.NDArray]:
"""
Gets a random batch of data of size +batch_size+. Returns a tuple of
data and target predictions. If +validation+ is set, prevents these
dates from being drawn for non-validation batches.
"""
# Define a Callable for testing appropriate dates
def _is_suitable_time(time: pd.Timestamp) -> bool:
"""
Returns whether the market is open at a given +time+ for the
required window.
"""
# First, confirm that this date matches the right type
day = time.floor(freq='D')
is_validation_date = (day.dayofyear % 10 == self._valid_seed)
if validation != is_validation_date:
return False
# Ensure it's on weekdays and during market hours. Note that we
# discard the last 10 minutes of trading because they are both
# dangerous for day trading and provide no good way to train the
# 10 minute output for the model.
if time.weekday() > 4:
return False
if (time.hour * 60 + time.minute) < 9 * 60 + 30:
return False
if (time.hour * 60 + time.minute + self._window) > 15 * 60 - self._lookahead:
return False
# Check aginst holidays. Note that for the sake of sanity, we
# don't include half days.
if day in self._holidays or day in self._half_days:
return False
return True
# Next, generate arrays of random dates within the last two years,
# recording appropriate ones to form an array of size +batch_size+
timestamps = pd.Series()
while True:
random_times = pd.to_datetime(np.random.randint(low=self._first.value,
high=self._latest.value,
size=(100),
dtype='int64')).to_series()
suitable_mask = random_times.apply(_is_suitable_time)
timestamps = pd.concat([timestamps, random_times.loc[suitable_mask]])
if len(timestamps) >= batch_size:
timestamps = timestamps[0 : batch_size]
break
index_array = pd.to_datetime(timestamps)
# Next, gather all data into batches with axes (batch, window, data...)
datas, targets = [], []
for timestamp in index_array:
data, target = self._get_data(timestamp, validation)
datas.append(data)
targets.append(target)
data_array, target_array = nd.stack(*datas), nd.stack(*targets)
# Return the data
return data_array, target_array
def data_features(self) -> List[str]:
"""
Returns a list of data features in the same order as presented in the
frames.
"""
return self._outputs
def _get_intraday_data(date: pd.Timestamp, provider: providers.DataProvider) \
-> pd.DataFrame:
"""
Gets the intraday datafrome limited to market hours for a given +date+
and +provider+.
"""
# First, get data and limit it to market hours
data = provider.intraday(date)
if data is None or data.empty:
raise RuntimeError(f"Something went wrong - empty data array for {date}!")
start = data.index[0].replace(hour=9, minute=30)
end = data.index[0].replace(hour=16, minute=0)
# Next, resample the data by the minute and interpolate missing values
data = data.loc[data.index.isin(pd.date_range(start=start, end=end, freq='min'))]
data = data.resample('min')
data = data.interpolate(method='time').copy()
return data
def _to_intraday_high(date: pd.Timestamp, provider: providers.DataProvider,
normalize: bool = True) -> nd.NDArray:
"""
Returns an ndarray consisting of the per-minute high of a data series for
a given +date+ and +provider+. If +normalize+, it is divided by the
open price.
"""
data = _get_intraday_data(date, provider)
high = ((data.high - data.open) / data.open) if normalize else data.high
return nd.array(high.values, utils.try_gpu(0))
def _to_intraday_low(date: pd.Timestamp, provider: providers.DataProvider,
normalize: bool = True) -> nd.NDArray:
"""
Returns an ndarray consisting of the per-minute high of a data series for
a given +date+ and +provider+. If +normalize+, it is divided by the
open price.
"""
data = _get_intraday_data(date, provider)
low = ((data.low - data.open) / data.open) if normalize else data.low
return nd.array(low.values, utils.try_gpu(0))
def _to_intraday_change(date: pd.Timestamp, provider: providers.DataProvider,
normalize: bool = True) -> nd.NDArray:
"""
Returns an ndarray consisting of the per-minute close of a data series for
a given +date+ and +provider+. If +normalize+, it is divided by the
previous close
"""
data = _get_intraday_data(date, provider)
close_prev = data.close.shift(periods=1, fill_value=data.close[0])
close = ((data.close - close_prev) / close_prev) if normalize else data.close
return nd.array(close.values, utils.try_gpu(0))
def _to_intraday_open(date: pd.Timestamp, provider: providers.DataProvider,
normalize: bool = True) -> nd.NDArray:
"""
Returns an ndarray consisting of the per-minute open of a data series for
a given +date+ and +provider+. If +normalize+, it is divided by the
daily open price.
"""
data = _get_intraday_data(date, provider)
open = (data.open / data.open.iloc[0]) if normalize else data.open
return nd.array(open.values, utils.try_gpu(0))
def _to_intraday_volume(date: pd.Timestamp, provider: providers.DataProvider,
normalize: bool = True) -> nd.NDArray:
"""
Returns an ndarray consisting of the per-minute high of a data series for
a given +date+ and +provider+. If +normalize+, it is divided by the
average volume.
"""
data = _get_intraday_data(date, provider)
vol = data.volume / data.volume.mean() if normalize else data.volume
return nd.array(vol.values, utils.try_gpu(0))
def _to_intraday_time(date: pd.Timestamp, provider: providers.DataProvider,
normalize: bool = True) -> nd.NDArray:
"""
Returns an ndarray consisting of the trading minute of a data series for
a given +date+ and +provider+. If +normalize+, it is normalized so that
9:30 is 0 and 16:00 is 1
"""
data = _get_intraday_data(date, provider)
minute = data.index.hour * 60 + data.index.minute - (9 * 60 + 30)
tempus = (minute / (60 * 7 + 30)) if normalize else minute
return nd.array(tempus.values, utils.try_gpu(0))
def _to_intraday_macd(date: pd.Timestamp, provider: providers.DataProvider,
normalize: bool = True) -> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns a pair of ndarrays consisting of the per-minute MACD of a data
series for a given +date+ and +provider+, and a signal for the same. If
normalize+, both are divided by the daily open price.
"""
# First, calculate the MACD via exponential moving averages
data = _get_intraday_data(date, provider)
ewm12 = pd.Series.ewm(data['close'], span=12).mean()
ewm26 = pd.Series.ewm(data['close'], span=26).mean()
macd = ewm26 - ewm12
# Next, calculate the signal line
signal = | pd.Series.ewm(macd, span=9) | pandas.Series.ewm |
import io
import numpy as np
import pytest
from pandas.compat._optional import VERSIONS
from pandas import (
DataFrame,
date_range,
read_csv,
read_excel,
read_feather,
read_json,
read_parquet,
read_pickle,
read_stata,
read_table,
)
import pandas._testing as tm
from pandas.util import _test_decorators as td
df1 = DataFrame(
{
"int": [1, 3],
"float": [2.0, np.nan],
"str": ["t", "s"],
"dt": date_range("2018-06-18", periods=2),
}
)
text = str(df1.to_csv(index=False)).encode()
@pytest.fixture
def cleared_fs():
fsspec = pytest.importorskip("fsspec")
memfs = fsspec.filesystem("memory")
yield memfs
memfs.store.clear()
def test_read_csv(cleared_fs):
with cleared_fs.open("test/test.csv", "wb") as w:
w.write(text)
df2 = | read_csv("memory://test/test.csv", parse_dates=["dt"]) | pandas.read_csv |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import datetime as dt
import os
from typing import Union
import numpy as np
import pandas as pd
import pytest
import pytz
from gs_quant.target.common import XRef, PricingLocation, Currency as CurrEnum
from numpy.testing import assert_allclose
from pandas.testing import assert_series_equal
from pandas.tseries.offsets import CustomBusinessDay
from pytz import timezone
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures as tm
import gs_quant.timeseries.measures_rates as tm_rates
from gs_quant.api.gs.assets import GsTemporalXRef, GsAssetApi, GsIdType, IdList, GsAsset
from gs_quant.api.gs.data import GsDataApi, MarketDataResponseFrame
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.dataset import Dataset
from gs_quant.data.fields import Fields
from gs_quant.errors import MqError, MqValueError, MqTypeError
from gs_quant.markets.securities import AssetClass, Cross, Index, Currency, SecurityMaster, Stock, \
Swap, CommodityNaturalGasHub
from gs_quant.session import GsSession, Environment
from gs_quant.test.timeseries.utils import mock_request
from gs_quant.timeseries import Returns
from gs_quant.timeseries.measures import BenchmarkType
_index = [pd.Timestamp('2019-01-01')]
_test_datasets = ('TEST_DATASET',)
def mock_empty_market_data_response():
df = MarketDataResponseFrame()
df.dataset_ids = ()
return df
def map_identifiers_default_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-LIBOR-BBA" in ids:
return {"USD-LIBOR-BBA": "MAPDB7QNB2TZVQ0E"}
elif "EUR-EURIBOR-TELERATE" in ids:
return {"EUR-EURIBOR-TELERATE": "MAJNQPFGN1EBDHAE"}
elif "GBP-LIBOR-BBA" in ids:
return {"GBP-LIBOR-BBA": "MAFYB8Z4R1377A19"}
elif "JPY-LIBOR-BBA" in ids:
return {"JPY-LIBOR-BBA": "MABMVE27EM8YZK33"}
elif "EUR OIS" in ids:
return {"EUR OIS": "MARFAGXDQRWM07Y2"}
def map_identifiers_swap_rate_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m" in ids:
return {"USD-3m": "MAAXGV0GZTW4GFNC"}
elif "EUR-6m" in ids:
return {"EUR-6m": "MA5WM2QWRVMYKDK0"}
elif "KRW" in ids:
return {"KRW": 'MAJ6SEQH3GT0GA2Z'}
def map_identifiers_inflation_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "CPI-UKRPI" in ids:
return {"CPI-UKRPI": "MAQ7ND0MBP2AVVQW"}
elif "CPI-CPXTEMU" in ids:
return {"CPI-CPXTEMU": "MAK1FHKH5P5GJSHH"}
def map_identifiers_cross_basis_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m/JPY-3m" in ids:
return {"USD-3m/JPY-3m": "MA99N6C1KF9078NM"}
elif "EUR-3m/USD-3m" in ids:
return {"EUR-3m/USD-3m": "MAXPKTXW2D4X6MFQ"}
elif "GBP-3m/USD-3m" in ids:
return {"GBP-3m/USD-3m": "MA8BZHQV3W32V63B"}
def get_data_policy_rate_expectation_mocker(
start: Union[dt.date, dt.datetime] = None,
end: Union[dt.date, dt.datetime] = None,
as_of: dt.datetime = None,
since: dt.datetime = None,
fields: Union[str, Fields] = None,
asset_id_type: str = None,
**kwargs) -> pd.DataFrame:
if 'meetingNumber' in kwargs:
if kwargs['meetingNumber'] == 0:
return mock_meeting_spot()
elif 'meeting_date' in kwargs:
if kwargs['meeting_date'] == dt.date(2019, 10, 24):
return mock_meeting_spot()
return mock_meeting_expectation()
def test_parse_meeting_date():
assert tm.parse_meeting_date(5) == ''
assert tm.parse_meeting_date('') == ''
assert tm.parse_meeting_date('test') == ''
assert tm.parse_meeting_date('2019-09-01') == dt.date(2019, 9, 1)
def test_currency_to_default_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
asset_id_list = ["MAZ7RWC904JYHYPS", "MAJNQPFGN1EBDHAE", "MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8",
"MA4B66MW5E27U8P32SB"]
correct_mapping = ["MAPDB7QNB2TZVQ0E", "MAJNQPFGN1EBDHAE", "MAFYB8Z4R1377A19", "MABMVE27EM8YZK33",
"MA4J1YB8XZP2BPT8", "MA4B66MW5E27U8P32SB"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_default_swap_rate_asset(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_swap_rate_mocker)
asset_id_list = ['MAZ7RWC904JYHYPS', 'MAJNQPFGN1EBDHAE', 'MAJ6SEQH3GT0GA2Z']
correct_mapping = ['MAAXGV0GZTW4GFNC', 'MA5WM2QWRVMYKDK0', 'MAJ6SEQH3GT0GA2Z']
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_swap_rate_asset(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_inflation_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_inflation_mocker)
asset_id_list = ["MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
correct_mapping = ["MAQ7ND0MBP2AVVQW", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_inflation_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.currency_to_inflation_benchmark_rate('MA66CZBQJST05XKG') == 'MA66CZBQJST05XKG'
def test_cross_to_basis(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_cross_basis_mocker)
asset_id_list = ["MAYJPCVVF2RWXCES", "MA4B66MW5E27U8P32SB", "nobbid"]
correct_mapping = ["MA99N6C1KF9078NM", "MA4B66MW5E27U8P32SB", "nobbid"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_basis(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.cross_to_basis('MAYJPCVVF2RWXCES') == 'MAYJPCVVF2RWXCES'
def test_currency_to_tdapi_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA25DW5ZGC1BSC8Y', 'NOK')
bbid_mock.return_value = 'NOK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAFRSWPAF5QPNTP2' == correct_id
bbid_mock.return_value = 'CHF'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAW25BGQJH9P6DPT' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAA9MVX15AJNQCVG' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA6QCAP9B7ABS9HA' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAEE219J5ZP0ZKRK' == correct_id
bbid_mock.return_value = 'SEK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAETMVTPNP3199A5' == correct_id
bbid_mock.return_value = 'HKD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MABRNGY8XRFVC36N' == correct_id
bbid_mock.return_value = 'NZD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAH16NHE1HBN0FBZ' == correct_id
bbid_mock.return_value = 'AUD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAY8147CRK0ZP53B' == correct_id
bbid_mock.return_value = 'CAD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MANJ8SS88WJ6N28Q' == correct_id
bbid_mock.return_value = 'KRW'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAP55AXG5SQVS6C5' == correct_id
bbid_mock.return_value = 'INR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA20JHJXN1PD5HGE' == correct_id
bbid_mock.return_value = 'CNY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA4K1D8HH2R0RQY5' == correct_id
bbid_mock.return_value = 'SGD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA5CQFHYBPH9E5BS' == correct_id
bbid_mock.return_value = 'DKK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAF131NKWVRESFYA' == correct_id
asset = Currency('MA890', 'PLN')
bbid_mock.return_value = 'PLN'
assert 'MA890' == tm_rates._currency_to_tdapi_swap_rate_asset(asset)
replace.restore()
def test_currency_to_tdapi_basis_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA890', 'NOK')
bbid_mock.return_value = 'NOK'
assert 'MA890' == tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAQB1PGEJFCET3GG' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAGRG2VT11GQ2RQ9' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAHCYNB3V75JC5Q8' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAXVRBEZCJVH0C4V' == correct_id
replace.restore()
def test_check_clearing_house():
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house('lch')
assert tm_rates._ClearingHouse.CME == tm_rates._check_clearing_house(tm_rates._ClearingHouse.CME)
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house(None)
invalid_ch = ['NYSE']
for ch in invalid_ch:
with pytest.raises(MqError):
tm_rates._check_clearing_house(ch)
def test_get_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
assert dict(csaTerms='USD-1') == tm_rates._get_swap_csa_terms('USD', fed_funds_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_swap_csa_terms('EUR', estr_index)
assert {} == tm_rates._get_swap_csa_terms('EUR', euribor_index)
assert {} == tm_rates._get_swap_csa_terms('USD', usd_libor_index)
def test_get_basis_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
sofr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.SOFR.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
eonia_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EONIA.value]
assert dict(csaTerms='USD-1') == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, sofr_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_basis_swap_csa_terms('EUR', estr_index, eonia_index)
assert {} == tm_rates._get_basis_swap_csa_terms('EUR', eonia_index, euribor_index)
assert {} == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, usd_libor_index)
def test_match_floating_tenors():
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='6m')
assert swap_args == tm_rates._match_floating_tenors(swap_args)
def test_get_term_struct_date(mocker):
today = datetime.datetime.today()
biz_day = CustomBusinessDay()
assert today == tm_rates._get_term_struct_date(tenor=today, index=today, business_day=biz_day)
date_index = datetime.datetime(2020, 7, 31, 0, 0)
assert date_index == tm_rates._get_term_struct_date(tenor='2020-07-31', index=date_index, business_day=biz_day)
assert date_index == tm_rates._get_term_struct_date(tenor='0b', index=date_index, business_day=biz_day)
assert datetime.datetime(2021, 7, 30, 0, 0) == tm_rates._get_term_struct_date(tenor='1y', index=date_index,
business_day=biz_day)
def test_cross_stored_direction_for_fx_vol(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_stored_direction_for_fx_vol(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_usd_based_cross_for_fx_forecast(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_usd_based_cross(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_used_based_cross(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_to_usd_based_cross(Cross('FUN', 'EURUSD'))
replace.restore()
def test_cross_stored_direction(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_stored_direction_for_fx_vol(Cross('FUN', 'EURUSD'))
replace.restore()
def test_get_tdapi_rates_assets(mocker):
mock_asset_1 = GsAsset(asset_class='Rate', id='MAW25BGQJH9P6DPT', type_='Swap', name='Test_asset')
mock_asset_2 = GsAsset(asset_class='Rate', id='MAA9MVX15AJNQCVG', type_='Swap', name='Test_asset')
mock_asset_3 = GsAsset(asset_class='Rate', id='MANQHVYC30AZFT7R', type_='BasisSwap', name='Test_asset')
replace = Replacer()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1]
assert 'MAW25BGQJH9P6DPT' == tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict(asset_parameters_termination_date='10y', asset_parameters_effective_date='0b')
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = []
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict()
assert ['MAW25BGQJH9P6DPT', 'MAA9MVX15AJNQCVG'] == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
# test case will test matching sofr maturity with libor leg and flipping legs to get right asset
kwargs = dict(type='BasisSwap', asset_parameters_termination_date='10y',
asset_parameters_payer_rate_option=BenchmarkType.LIBOR,
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=BenchmarkType.SOFR,
asset_parameters_receiver_designated_maturity='1y',
asset_parameters_clearing_house='lch', asset_parameters_effective_date='Spot',
asset_parameters_notional_currency='USD',
pricing_location='NYC')
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_3]
assert 'MANQHVYC30AZFT7R' == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
def test_get_swap_leg_defaults():
result_dict = dict(currency=CurrEnum.JPY, benchmark_type='JPY-LIBOR-BBA', floating_rate_tenor='6m',
pricing_location=PricingLocation.TKO)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.JPY)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.USD, benchmark_type='USD-LIBOR-BBA', floating_rate_tenor='3m',
pricing_location=PricingLocation.NYC)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.USD)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.EUR, benchmark_type='EUR-EURIBOR-TELERATE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.EUR)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.SEK, benchmark_type='SEK-STIBOR-SIDE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.SEK)
assert result_dict == defaults
def test_check_forward_tenor():
valid_tenors = [datetime.date(2020, 1, 1), '1y', 'imm2', 'frb2', '1m', '0b']
for tenor in valid_tenors:
assert tenor == tm_rates._check_forward_tenor(tenor)
invalid_tenors = ['5yr', 'imm5', 'frb0']
for tenor in invalid_tenors:
with pytest.raises(MqError):
tm_rates._check_forward_tenor(tenor)
def mock_commod(_cls, _q):
d = {
'price': [30, 30, 30, 30, 35.929686, 35.636039, 27.307498, 23.23177, 19.020833, 18.827291, 17.823749, 17.393958,
17.824999, 20.307603, 24.311249, 25.160103, 25.245728, 25.736873, 28.425206, 28.779789, 30.519996,
34.896348, 33.966973, 33.95489, 33.686348, 34.840307, 32.674163, 30.261665, 30, 30, 30]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-05-01', periods=31, freq='H', tz=timezone('UTC')))
df.dataset_ids = _test_datasets
return df
def mock_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
11.6311,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 9))
df.dataset_ids = _test_datasets
return df
def mock_fair_price(_cls, _q):
d = {
'fairPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_natgas_forward_price(_cls, _q):
d = {
'forwardPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_fair_price_swap(_cls, _q):
d = {'fairPrice': [2.880]}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)]))
df.dataset_ids = _test_datasets
return df
def mock_implied_volatility(_cls, _q):
d = {
'impliedVolatility': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_missing_bucket_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"J20",
"K20",
"M20",
]
}
return pd.DataFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 8))
def mock_fx_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
d = {
'strikeReference': ['delta', 'spot', 'forward'],
'relativeStrike': [25, 100, 100],
'impliedVolatility': [5, 1, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-01-01', periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_fx_forecast(_cls, _q):
d = {
'fxForecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_delta(_cls, _q):
d = {
'relativeStrike': [25, -25, 0],
'impliedVolatility': [1, 5, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_empty(_cls, _q):
d = {
'strikeReference': [],
'relativeStrike': [],
'impliedVolatility': []
}
df = MarketDataResponseFrame(data=d, index=[])
df.dataset_ids = _test_datasets
return df
def mock_fx_switch(_cls, _q, _n):
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_empty)
replace.restore()
return Cross('MA1889', 'ABC/XYZ')
def mock_curr(_cls, _q):
d = {
'swapAnnuity': [1, 2, 3],
'swapRate': [1, 2, 3],
'basisSwapRate': [1, 2, 3],
'swaptionVol': [1, 2, 3],
'atmFwdRate': [1, 2, 3],
'midcurveVol': [1, 2, 3],
'capFloorVol': [1, 2, 3],
'spreadOptionVol': [1, 2, 3],
'inflationSwapRate': [1, 2, 3],
'midcurveAtmFwdRate': [1, 2, 3],
'capFloorAtmFwdRate': [1, 2, 3],
'spreadOptionAtmFwdRate': [1, 2, 3],
'strike': [0.25, 0.5, 0.75]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_cross(_cls, _q):
d = {
'basis': [1, 2, 3],
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq(_cls, _q):
d = {
'relativeStrike': [0.75, 0.25, 0.5],
'impliedVolatility': [5, 1, 2],
'impliedCorrelation': [5, 1, 2],
'realizedCorrelation': [3.14, 2.71828, 1.44],
'averageImpliedVolatility': [5, 1, 2],
'averageImpliedVariance': [5, 1, 2],
'averageRealizedVolatility': [5, 1, 2],
'impliedVolatilityByDeltaStrike': [5, 1, 2],
'fundamentalMetric': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
idx = [pd.Timestamp(datetime.datetime.now(pytz.UTC))]
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=idx)
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.datetime.now(pytz.UTC).date() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_err(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
raise MqValueError('error while getting last')
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_empty(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame()
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_norm(_cls, _q):
d = {
'relativeStrike': [-4.0, 4.0, 0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_spot(_cls, _q):
d = {
'relativeStrike': [0.75, 1.25, 1.0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_inc(_cls, _q):
d = {
'relativeStrike': [0.25, 0.75],
'impliedVolatility': [5, 1]
}
df = MarketDataResponseFrame(data=d, index=_index * 2)
df.dataset_ids = _test_datasets
return df
def mock_meeting_expectation():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2020, 1, 29)],
'endingDate': [dt.date(2020, 1, 29)],
'meetingNumber': [2],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2020, 1, 23)],
'value': [-0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_spot():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2019, 10, 30)],
'endingDate': [dt.date(2019, 12, 18)],
'meetingNumber': [0],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2019, 10, 24)],
'value': [-0.004522570525]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_absolute():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2', 'MARFAGXDQRWM07Y2'],
'location': ['NYC', 'NYC'],
'rateType': ['Meeting Forward', 'Meeting Forward'],
'startingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'endingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'meetingNumber': [0, 2],
'valuationDate': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 10, 24), datetime.date(2020, 1, 23)],
'value': [-0.004522570525, -0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_ois_spot():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Spot'],
'startingDate': [datetime.date(2019, 12, 6)],
'endingDate': [datetime.date(2019, 12, 7)],
'meetingNumber': [-1],
'valuationDate': [datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 12, 6)],
'value': [-0.00455]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_esg(_cls, _q):
d = {
"esNumericScore": [2, 4, 6],
"esNumericPercentile": [81.2, 75.4, 65.7],
"esPolicyScore": [2, 4, 6],
"esPolicyPercentile": [81.2, 75.4, 65.7],
"esScore": [2, 4, 6],
"esPercentile": [81.2, 75.4, 65.7],
"esProductImpactScore": [2, 4, 6],
"esProductImpactPercentile": [81.2, 75.4, 65.7],
"gScore": [2, 4, 6],
"gPercentile": [81.2, 75.4, 65.7],
"esMomentumScore": [2, 4, 6],
"esMomentumPercentile": [81.2, 75.4, 65.7],
"gRegionalScore": [2, 4, 6],
"gRegionalPercentile": [81.2, 75.4, 65.7],
"controversyScore": [2, 4, 6],
"controversyPercentile": [81.2, 75.4, 65.7],
"esDisclosurePercentage": [49.2, 55.7, 98.4]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_index_positions_data(
asset_id,
start_date,
end_date,
fields=None,
position_type=None
):
return [
{'underlyingAssetId': 'MA3',
'netWeight': 0.1,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA1',
'netWeight': 0.6,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA2',
'netWeight': 0.3,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
}
]
def mock_rating(_cls, _q):
d = {
'rating': ['Buy', 'Sell', 'Buy', 'Neutral'],
'convictionList': [1, 0, 0, 0]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_gsdeer_gsfeer(_cls, assetId, start_date):
d = {
'gsdeer': [1, 1.2, 1.1],
'gsfeer': [2, 1.8, 1.9],
'year': [2000, 2010, 2020],
'quarter': ['Q1', 'Q2', 'Q3']
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
return df
def mock_factor_profile(_cls, _q):
d = {
'growthScore': [0.238, 0.234, 0.234, 0.230],
'financialReturnsScore': [0.982, 0.982, 0.982, 0.982],
'multipleScore': [0.204, 0.192, 0.190, 0.190],
'integratedScore': [0.672, 0.676, 0.676, 0.674]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_commodity_forecast(_cls, _q):
d = {
'forecastPeriod': ['3m', '3m', '3m', '3m'],
'forecastType': ['spotReturn', 'spotReturn', 'spotReturn', 'spotReturn'],
'commodityForecast': [1700, 1400, 1500, 1600]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def test_skew():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_norm)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.NORMALIZED, 4)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_spot)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock.return_value = mock_empty_market_data_response()
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert actual.empty
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_inc)
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
replace.restore()
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', None, 25)
def test_skew_fx():
replace = Replacer()
cross = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = cross
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_delta)
mock = cross
actual = tm.skew(mock, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.DELTA, 25, real_time=True)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.SPOT, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.FORWARD, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', None, 25)
replace.restore()
def test_implied_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol)
idx = pd.date_range(end=datetime.datetime.now(pytz.UTC).date(), periods=4, freq='D')
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_NEUTRAL)
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL)
replace.restore()
def test_implied_vol_no_last():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
idx = pd.date_range(end=datetime.date.today() - datetime.timedelta(days=1), periods=3, freq='D')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_err)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_empty)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace.restore()
def test_implied_vol_fx():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
# for different delta strikes
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_vol)
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL, 25)
expected = pd.Series([5, 1, 2, 3], index=pd.date_range('2019-01-01', periods=4, freq='D'), name='impliedVolatility')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_PUT, 25)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_NEUTRAL)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# NORMALIZED not supported
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 25)
replace.restore()
def test_fx_forecast():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
actual = tm.fx_forecast(mock, '12m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fx_forecast(mock, '3m', real_time=True)
replace.restore()
def test_fx_forecast_inverse():
replace = Replacer()
get_cross = replace('gs_quant.timeseries.measures.cross_to_usd_based_cross', Mock())
get_cross.return_value = "MATGYV0J9MPX534Z"
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
mock = Cross("MAYJPCVVF2RWXCES", 'USD/JPY')
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1 / 1.1, 1 / 1.1, 1 / 1.1], index=_index * 3, name='fxForecast'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_vol_smile():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.FORWARD, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d')
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
with pytest.raises(NotImplementedError):
tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d', real_time=True)
replace.restore()
def test_impl_corr():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_CALL, 50, '')
replace.restore()
def test_impl_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5,
composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
i_vol = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_in.csv'))
i_vol.index = pd.to_datetime(i_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = i_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 50, datetime.date(2020, 8, 31),
source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_real_corr():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(NotImplementedError):
tm.realized_correlation(spx, '1m', real_time=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.realized_correlation(spx, '1m')
assert_series_equal(pd.Series([3.14, 2.71828, 1.44], index=_index * 3), pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_real_corr_missing():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
d = {
'assetId': ['MA4B66MW5E27U8P32SB'] * 3,
'spot': [3000, 3100, 3050],
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2020-08-01', periods=3, freq='D'))
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', lambda *args, **kwargs: df)
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 50)
replace.restore()
def test_real_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
r_vol = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_in.csv'))
r_vol.index = pd.to_datetime(r_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = r_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.realized_correlation(spx, '1m', 50, datetime.date(2020, 8, 31), source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_cds_implied_vol():
replace = Replacer()
mock_cds = Index('MA890', AssetClass.Equity, 'CDS')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.DELTA_CALL, 10)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.FORWARD, 100)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cds_implied_volatility(..., '1m', '5y', tm.CdsVolReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_avg_impl_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), | pd.Series(actual) | pandas.Series |
import sys
sys.path.append('/pvc/')
import src.evaluation_utils as evaluation_utils
import utils.utils as utils
import datasets
import pandas as pd
import numpy as np
def save_adapter_metrics(data_paths, language, eval_dataset_name, eval_type, eval_model_path, output_path, nsamples):
train_dataset, dev_dataset, test_dataset, labels = utils.get_datav2(data_paths,
dataset_name=eval_dataset_name)
#if eval_type == 'zero_shot_ccs' and eval_dataset_name == 'codie':
#dataset = datasets.concatenate_datasets([train_dataset, dev_dataset, test_dataset])
if eval_type in ['few_shot_ccs', 'zero_shot_ccs', 'top_codes', 'average']:
dataset = test_dataset
metrics = evaluation_utils.evaluate_adapter(language=language,
model_path=eval_model_path,
model_name="xlm-roberta-base",
labels=labels,
dataset=dataset,
dataset_name=eval_dataset_name)
if eval_type != 'average':
result = evaluation_utils.compute_metrics(metrics,
eval_type=eval_type,
data_paths=data_paths,
dataset_name=eval_dataset_name,
nsamples=nsamples)
else:
result = dict()
result['avg_pr_auc'] = metrics['eval_val_pr_auc']
result['avg_micro_auc'] = metrics['eval_micro_auc']
result['avg_auc'] = metrics['eval_val_auc']
import numpy as np
print('avg_auc', np.round(result['avg_auc']*100, 2))
print('avg_micro_auc', np.round(result['avg_micro_auc']*100,2))
print('avg_pr_auc', np.round(result['avg_pr_auc']*100,2))
#evaluation_utils.save_metric(metric=result,
# metric_path=output_path.format(f"{language}_{eval_dataset_name}_{eval_type}_metrics"))
return result
if __name__ == '__main__':
eval_models = evaluation_utils.get_best_adapter_model_paths(mname=None)
output_path = "/pvc/tasks/codie_ccs_based_data/evaluation_metrics/{}.pcl"
# SETTINGS
nfold = 3
filter_set_name = 'ccs_codie'
translator_data_selector=None #'Opus_es_to_en'
eval_dataset_name = 'codie'
language = 'spanish'
mname = 'mimic_achepa_codie_MLA'
eval_type = 'few_shot_ccs' #'zero_shot_ccs' #average #few_shot_ccs, top_codes
eval_model_path = eval_models[mname]
data_paths = evaluation_utils.get_data_paths(nfold, filter_set_name, eval_dataset_name, translator_data_selector)
'achepa_original_SLA'
'codie_original_SLA'
'mimic_original_SLA'
'mimic_achepa_MLA'
'mimic_achepa_codie_MLA'
'mimic_codie_MLA'
'mimic_codie_MLA_full_ft'
'achepa_codie_MLA'
if eval_type == 'few_shot_ccs':
#{'min':1, 'max':5},
groups = [{'min':0, 'max':10}, {'min': 11, 'max':50},
{'min': 51, 'max':100}, {'min': 101, 'max':1e4}]
if eval_dataset_name == 'codie':
#groups = [{'min': 0, 'max':0}] + groups
plot_df = pd.DataFrame(np.zeros((4)))
else:
plot_df = pd.DataFrame(np.zeros((4)))
for idx,nsamples in enumerate(groups):
result = save_adapter_metrics(data_paths,
language=language,
eval_dataset_name=eval_dataset_name,
eval_type=eval_type,
eval_model_path=eval_model_path,
output_path=output_path,
nsamples=nsamples,)
plot_df.iloc[idx] = result['avg_auc']
result = dict([(k,result[k]) for k in result if k not in ['avg_auc', 'avg_micro_auc', 'avg_pr_auc']])
| pd.DataFrame.from_dict(result) | pandas.DataFrame.from_dict |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data."""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple enumerating EIA 860 tables for which PUDL's ETL works."""
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CC': 'Combined-Cycle, Total Unit',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'N': 'New Contract - see NC code. This abbreviation existed only in 2008 before being replaced by NC.',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OT': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [
# base cols
['plant_id_eia'],
# static cols
['balancing_authority_code_eia', 'balancing_authority_name_eia',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude', 'service_area',
'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'datum', 'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'nerc_region', 'net_metering',
'pipeline_notes', 'regulatory_status_code',
'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
],
'generators': [
# base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'owned_by_non_utility', 'deliver_power_transgrid',
'summer_capacity_mw', 'winter_capacity_mw', 'summer_capacity_estimate',
'winter_capacity_estimate', 'minimum_load_mw', 'distributed_generation',
'technology_description', 'reactive_power_output_mvar',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'energy_source_1_transport_1', 'energy_source_1_transport_2',
'energy_source_1_transport_3', 'energy_source_2_transport_1',
'energy_source_2_transport_2', 'energy_source_2_transport_3',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date',
'utility_id_eia', 'data_source'],
# need type fixing
{}
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [
# base cols
['utility_id_eia'],
# static cols
['utility_name_eia'],
# annual cols
['street_address', 'city', 'state', 'zip_code', 'entity_type',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
'attention_line', 'address_2', 'zip_code_4',
'contact_firstname', 'contact_lastname', 'contact_title',
'contact_firstname_2', 'contact_lastname_2', 'contact_title_2',
'phone_extension_1', 'phone_extension_2', 'phone_number_1',
'phone_number_2'],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [
# base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{},
]
}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
'ferc714',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2020)),
'eia861': tuple(range(1990, 2020)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2021)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2020)),
'ferc714': (None, ),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_partitions = {
'eia860': {
'years': tuple(range(2004, 2020))
},
'eia860m': {
'year_month': '2020-11'
},
'eia861': {
'years': tuple(range(2001, 2020))
},
'eia923': {
'years': tuple(range(2001, 2020))
},
'epacems': {
'years': tuple(range(1995, 2021)),
'states': tuple(cems_states.keys())},
'ferc1': {
'years': tuple(range(1994, 2020))
},
'ferc714': {},
}
"""
dict: A dictionary of data sources (keys) and dictionaries (values) of names of
partition type (sub-key) and paritions (sub-value) containing the paritions
such as tuples of years for each data source that are able to be ingested
into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': (
"service_territory_eia861",
"balancing_authority_eia861",
"sales_eia861",
"advanced_metering_infrastructure_eia861",
"demand_response_eia861",
"demand_side_management_eia861",
"distributed_generation_eia861",
"distribution_systems_eia861",
"dynamic_pricing_eia861",
"energy_efficiency_eia861",
"green_pricing_eia861",
"mergers_eia861",
"net_metering_eia861",
"non_net_metering_eia861",
"operational_data_eia861",
"reliability_eia861",
"utility_data_eia861",
),
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
),
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "Catalyst Cooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'ferc714': [
'electricity', 'electric', 'utility', 'planning area', 'form 714',
'balancing authority', 'demand', 'system lambda', 'ferc',
'federal energy regulatory commission', "hourly", "generation",
"interchange", "forecast", "load", "adjacency", "plants",
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
ENTITY_TYPE_DICT = {
'M': 'Municipal',
'C': 'Cooperative',
'R': 'Retail Power Marketer',
'I': 'Investor Owned',
'P': 'Political Subdivision',
'T': 'Transmission',
'S': 'State',
'W': 'Wholesale Power Marketer',
'F': 'Federal',
'A': 'Municipal Mktg Authority',
'G': 'Community Choice Aggregator',
'D': 'Nonutility DSM Administrator',
'B': 'Behind the Meter',
'Q': 'Independent Power Producer',
'IND': 'Industrial',
'COM': 'Commercial',
'PR': 'Private', # Added by AES for OD table (Arbitrary moniker)
'PO': 'Power Marketer', # Added by AES for OD table
'U': 'Unknown', # Added by AES for OD table
'O': 'Other' # Added by AES for OD table
}
# Confirm these designations -- educated guess based on the form instructions
MOMENTARY_INTERRUPTION_DEF = { # Added by AES for R table
'L': 'Less than 1 minute',
'F': 'Less than or equal to 5 minutes',
'O': 'Other',
}
# https://www.eia.gov/electricity/data/eia411/#tabs_NERC-3
RECOGNIZED_NERC_REGIONS = [
'BASN', # ASSESSMENT AREA Basin (WECC)
'CALN', # ASSESSMENT AREA California (WECC)
'CALS', # ASSESSMENT AREA California (WECC)
'DSW', # ASSESSMENT AREA Desert Southwest (WECC)
'ASCC', # Alaska
'ISONE', # ISO New England (NPCC)
'ERCOT', # lumped under TRE in 2017 Form instructions
'NORW', # ASSESSMENT AREA Northwest (WECC)
'NYISO', # ISO (NPCC)
'PJM', # RTO
'ROCK', # ASSESSMENT AREA Rockies (WECC)
'ECAR', # OLD RE Now part of RFC and SERC
'FRCC', # included in 2017 Form instructions, recently joined with SERC
'HICC', # Hawaii
'MAAC', # OLD RE Now part of RFC
'MAIN', # OLD RE Now part of SERC, RFC, MRO
'MAPP', # OLD/NEW RE Became part of MRO, resurfaced in 2010
'MRO', # RE included in 2017 Form instructions
'NPCC', # RE included in 2017 Form instructions
'RFC', # RE included in 2017 Form instructions
'SERC', # RE included in 2017 Form instructions
'SPP', # RE included in 2017 Form instructions
'TRE', # RE included in 2017 Form instructions (included ERCOT)
'WECC', # RE included in 2017 Form instructions
'WSCC', # OLD RE pre-2002 version of WECC
'MISO', # ISO unclear whether technically a regional entity, but lots of entries
'ECAR_MAAC',
'MAPP_WECC',
'RFC_SERC',
'SPP_WECC',
'MRO_WECC',
'ERCOT_SPP',
'SPP_TRE',
'ERCOT_TRE',
'MISO_TRE',
'VI', # Virgin Islands
'GU', # Guam
'PR', # Puerto Rico
'AS', # American Samoa
'UNK',
]
CUSTOMER_CLASSES = [
"commercial",
"industrial",
"direct_connection",
"other",
"residential",
"total",
"transportation"
]
TECH_CLASSES = [
'backup', # WHERE Is this used? because removed from DG table b/c not a real component
'chp_cogen',
'combustion_turbine',
'fuel_cell',
'hydro',
'internal_combustion',
'other',
'pv',
'steam',
'storage_pv',
'all_storage', # need 'all' as prefix so as not to confuse with other storage category
'total',
'virtual_pv',
'wind',
]
REVENUE_CLASSES = [
'retail_sales',
'unbundled',
'delivery_customers',
'sales_for_resale',
'credits_or_adjustments',
'other',
'transmission',
'total',
]
RELIABILITY_STANDARDS = [
'ieee_standard',
'other_standard'
]
FUEL_CLASSES = [
'gas',
'oil',
'other',
'renewable',
'water',
'wind',
'wood',
]
RTO_CLASSES = [
'caiso',
'ercot',
'pjm',
'nyiso',
'spp',
'miso',
'isone',
'other'
]
ESTIMATED_OR_ACTUAL = {'E': 'estimated', 'A': 'actual'}
TRANSIT_TYPE_DICT = {
'CV': 'conveyer',
'PL': 'pipeline',
'RR': 'railroad',
'TK': 'truck',
'WA': 'water',
'UN': 'unknown',
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
"plant_id_ferc1": pd.Int64Dtype(),
"plant_id_pudl": pd.Int64Dtype(),
"report_date": "datetime64[ns]",
"report_year": pd.Int64Dtype(),
"utility_id_ferc1": pd.Int64Dtype(),
"utility_id_pudl": pd.Int64Dtype(),
},
"ferc714": { # INCOMPLETE
"demand_mwh": float,
"demand_annual_mwh": float,
"eia_code": pd.Int64Dtype(),
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
"report_date": "datetime64[ns]",
"respondent_id_ferc714": pd.Int64Dtype(),
"respondent_name_ferc714": pd.StringDtype(),
"respondent_type": pd.CategoricalDtype(categories=[
"utility", "balancing_authority",
]),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'actual_peak_demand_savings_mw': float, # Added by AES for DR table
'address_2': pd.StringDtype(), # Added by AES for 860 utilities table
'advanced_metering_infrastructure': pd.Int64Dtype(), # Added by AES for AMI table
# Added by AES for UD misc table
'alternative_fuel_vehicle_2_activity': pd.BooleanDtype(),
'alternative_fuel_vehicle_activity': pd.BooleanDtype(),
'annual_indirect_program_cost': float,
'annual_total_cost': float,
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'attention_line': pd.StringDtype(),
'automated_meter_reading': pd.Int64Dtype(), # Added by AES for AMI table
'backup_capacity_mw': float, # Added by AES for NNM & DG misc table
'balancing_authority_code_eia': pd.CategoricalDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name_eia': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bunded_activity': pd.BooleanDtype(),
'business_model': pd.CategoricalDtype(categories=[
"retail", "energy_services"]),
'buy_distribution_activity': pd.BooleanDtype(),
'buying_transmission_activity': pd.BooleanDtype(),
'bypass_heat_recovery': pd.BooleanDtype(),
'caidi_w_major_event_days_minus_loss_of_service_minutes': float,
'caidi_w_major_event_dats_minutes': float,
'caidi_wo_major_event_days_minutes': float,
'capacity_mw': float,
'carbon_capture': pd.BooleanDtype(),
'chlorine_content_ppm': float,
'circuits_with_voltage_optimization': pd.Int64Dtype(),
'city': pd.StringDtype(),
'cofire_fuels': pd.BooleanDtype(),
'consumed_by_facility_mwh': float,
'consumed_by_respondent_without_charge_mwh': float,
'contact_firstname': pd.StringDtype(),
'contact_firstname_2': pd.StringDtype(),
'contact_lastname': pd.StringDtype(),
'contact_lastname_2': pd.StringDtype(),
'contact_title': pd.StringDtype(),
'contact_title_2': pd.StringDtype(),
'contract_expiration_date': 'datetime64[ns]',
'contract_type_code': pd.StringDtype(),
'county': pd.StringDtype(),
'county_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'credits_or_adjustments': float,
'critical_peak_pricing': pd.BooleanDtype(),
'critical_peak_rebate': pd.BooleanDtype(),
'current_planned_operating_date': 'datetime64[ns]',
'customers': float,
'customer_class': pd.CategoricalDtype(categories=CUSTOMER_CLASSES),
'customer_incentives_cost': float,
'customer_incentives_incremental_cost': float,
'customer_incentives_incremental_life_cycle_cost': float,
'customer_other_costs_incremental_life_cycle_cost': float,
'daily_digital_access_customers': pd.Int64Dtype(),
'data_observed': pd.BooleanDtype(),
'datum': pd.StringDtype(),
'deliver_power_transgrid': pd.BooleanDtype(),
'delivery_customers': float,
'direct_load_control_customers': | pd.Int64Dtype() | pandas.Int64Dtype |
from __future__ import division
from unittest import TestCase
from nose_parameterized import parameterized
from numpy.testing import assert_allclose, assert_almost_equal
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
from .. import timeseries
from .. import utils
DECIMAL_PLACES = 8
class TestDrawdown(TestCase):
drawdown_list = np.array(
[100, 90, 75]
) / 10.
dt = pd.date_range('2000-1-3', periods=3, freq='D')
drawdown_serie = pd.Series(drawdown_list, index=dt)
@parameterized.expand([
(drawdown_serie,)
])
def test_get_max_drawdown_begins_first_day(self, px):
rets = px.pct_change()
drawdowns = timeseries.gen_drawdown_table(rets, top=1)
self.assertEqual(drawdowns.loc[0, 'net drawdown in %'], 25)
drawdown_list = np.array(
[100, 110, 120, 150, 180, 200, 100, 120,
160, 180, 200, 300, 400, 500, 600, 800,
900, 1000, 650, 600]
) / 10.
dt = pd.date_range('2000-1-3', periods=20, freq='D')
drawdown_serie = pd.Series(drawdown_list, index=dt)
@parameterized.expand([
(drawdown_serie,
pd.Timestamp('2000-01-08'),
pd.Timestamp('2000-01-09'),
pd.Timestamp('2000-01-13'),
50,
pd.Timestamp('2000-01-20'),
| pd.Timestamp('2000-01-22') | pandas.Timestamp |
import numpy as np
import pandas as pd
from pycytominer import aggregate
from pycytominer.cyto_utils import infer_cp_features
# Build data to use in tests
data_df = pd.concat(
[
pd.DataFrame({"g": "a", "Cells_x": [1, 3, 8], "Nuclei_y": [5, 3, 1]}),
pd.DataFrame({"g": "b", "Cells_x": [1, 3, 5], "Nuclei_y": [8, 3, 1]}),
]
).reset_index(drop=True)
data_missing_df = pd.concat(
[
pd.DataFrame(
{"g": "a", "Cells_x": [1, 3, 8, np.nan], "Nuclei_y": [5, np.nan, 3, 1]}
),
pd.DataFrame(
{"g": "b", "Cells_x": [1, 3, np.nan, 5], "Nuclei_y": [np.nan, 8, 3, 1]}
),
]
).reset_index(drop=True)
features = infer_cp_features(data_df)
dtype_convert_dict = {x: float for x in features}
def test_aggregate_median_allvar():
"""
Testing aggregate pycytominer function
"""
aggregate_result = aggregate(
population_df=data_df, strata=["g"], features="infer", operation="median"
)
expected_result = pd.concat(
[
pd.DataFrame({"g": "a", "Cells_x": [3], "Nuclei_y": [3]}),
pd.DataFrame({"g": "b", "Cells_x": [3], "Nuclei_y": [3]}),
]
).reset_index(drop=True)
expected_result = expected_result.astype(dtype_convert_dict)
assert aggregate_result.equals(expected_result)
def test_aggregate_mean_allvar():
"""
Testing aggregate pycytominer function
"""
aggregate_result = aggregate(
population_df=data_df, strata=["g"], features="infer", operation="mean"
)
expected_result = pd.concat(
[
pd.DataFrame({"g": "a", "Cells_x": [4], "Nuclei_y": [3]}),
| pd.DataFrame({"g": "b", "Cells_x": [3], "Nuclei_y": [4]}) | pandas.DataFrame |
import unittest
from pandas import DataFrame
from my_lambdata6.assignment import add_state_names_column
class TestMyAssignment(unittest.TestCase):
def test_add_state_names(self):
df = | DataFrame({'abbrev': ['CA', 'CO', 'CT', 'DC', 'TX']}) | pandas.DataFrame |
import warnings
warnings.filterwarnings("ignore")
import sys
import os
import pandas
from gensim.models import Word2Vec
import numpy as np
import torch
import torch.utils.data as Data
from vectorize_patch import PatchVectorizer
from svm_clf import SVM
from transformer_class import Config
from transformer_class import TransformerModel
from transformer_class import train, test_single_file, test
import warnings
warnings.filterwarnings("ignore")
def parse_file(filename):
with open(filename, "r", encoding='utf-8') as file:
patch = []
val = 0
for line in file:
# print(line)
stripped = line.strip()
if not stripped:
continue
if "-" * 40 in line:
yield patch, val
patch = []
elif stripped.split('.')[-1] == 'sol':
continue
elif stripped.split()[0].isdigit():
if stripped.isdigit():
val = int(stripped)
else:
patch.append(" ".join(stripped.split()[1:]))
def get_vectors(train_file, test_file, config):
train_patch = []
test_patch = []
count = 0
vectorizer = PatchVectorizer(config)
for patch, val in parse_file(train_file):
count += 1
print("Collecting train patches...", count, end="\r")
vectorizer.add_patch(patch)
row = {"patch" : patch, "val" : val}
train_patch.append(row)
print()
count = 0
for patch, val in parse_file(test_file):
count += 1
print("Collecting test patches...", count, end="\r")
vectorizer.add_patch(patch)
row = {"patch" : patch, "val" : val}
test_patch.append(row)
print()
print("Training model...")
vectorizer.train_model()
print()
print("Turn to vector...")
vectors = []
count = 0
for patch in train_patch:
count += 1
print("Processing train patches...", count, end="\r")
vector = vectorizer.vectorize(patch["patch"])
row = {"vector" : vector, "val" : patch["val"]}
vectors.append(row)
print()
df = pandas.DataFrame(vectors)
vectors = []
count = 0
for patch in test_patch:
count += 1
print("Processing test patches...", count, end="\r")
vector = vectorizer.vectorize(patch["patch"])
row = {"vector" : vector, "val" : patch["val"]}
vectors.append(row)
print()
df_test = pandas.DataFrame(vectors)
return df, df_test
def train_word2vec(train_file, test_dir, config):
train_patch = []
count = 0
vectorizer = PatchVectorizer(config)
for patch, val in parse_file(train_file):
count += 1
print("Collecting train patches...", count, end="\r")
vectorizer.add_patch(patch)
row = {"patch" : patch, "val" : val}
train_patch.append(row)
print()
count = 0
test_dict = {}
for root, dir, test_files in os.walk(test_dir):
for test_file in test_files:
test_patch = []
test_filepath = os.path.join(root, test_file)
for patch, val in parse_file(test_filepath):
count += 1
print("Collecting patch file...", count, end="\r")
vectorizer.add_patch(patch)
row = {"patch" : patch, "val" : val}
test_patch.append(row)
test_dict[test_file] = test_patch
print()
print("Training model...")
vectorizer.train_model()
print()
print("Generate vector file...")
vectors = []
count = 0
for patch in train_patch:
count += 1
print("Processing train patches...", count, end="\r")
vector = vectorizer.vectorize(patch["patch"])
row = {"vector" : vector, "val" : patch["val"]}
vectors.append(row)
print()
df = pandas.DataFrame(vectors)
df.to_pickle("train_set_vectors.pkl")
del df
count = 0
for filename in test_dict.keys():
vectors = []
test_patch = test_dict[filename]
for patch in test_patch:
count += 1
print("Processing test patches...", count, end="\r")
vector = vectorizer.vectorize(patch["patch"])
row = {"vector" : vector, "val" : patch["val"]}
vectors.append(row)
df_test = | pandas.DataFrame(vectors) | pandas.DataFrame |
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
# Data for plotting
df = | pd.read_csv('data/primes.txt', header=None, names=['n', 'prime', 'diff']) | pandas.read_csv |
"""
Growth Curve Collation
===========================
This script reads through all experiments within `code/processing/growth_curves/`
and `code/procssing/diauxic_shifts/` and collates all data from "accepted" experiments.
`collated_experiment_record_OD600_growth_curves.csv`:
This is a long-form tidy CSV file with records of all experiments that were
performed, regardless of their accpetance or rejection, and annotates
which were used and why. This information is based on the "status" and
"description" fields on the README.md file associated with each experiment.
`collated_OD600_growth_curves_exponential_phase.csv`
This is a long-form tidy CSV file with all growth measurements deemed to
be in the exponential phase of growth. Only experiments marked as "accepted"
in the README.md frontmatter is included in this file.
`collated_diauxic_shifts_labeled.csv
This is a long-form tidy CSV file with all diauxic shift measurements for
all strains and conditions. Only experiments marked as "accepted" in the README.md
frontmatter is included in this file.
"""
#%%
import numpy as np
import pandas as pd
import futileprot.io
import glob
# Get all of the folders for growth rates
gr_folders = glob.glob('../growth_rates/*')
# Get all of the folders for diauxic shifts
diaux_folders = glob.glob('../diauxic_shifts/*')
# Parse the frontmatter from the README.md files.
valid_paths = [[], []]
record = pd.DataFrame([])
exp_type = ['growth curve measurement', 'diauxic shift measurement']
for i, cat in enumerate([gr_folders, diaux_folders]):
for _, folder in enumerate(cat):
exp_info = futileprot.io.scrape_frontmatter(folder)
exp_info['experiment'] = folder.split('/')[-1]
exp_info['experiment_type'] = exp_type[i]
record = record.append(exp_info, ignore_index=True)
if exp_info['status'].lower() == 'accepted':
valid_paths[i].append(folder)
# Format and store the experiment record
record = record[['experiment', 'experiment_type', 'status', 'description']]
record.sort_values(by='experiment', inplace=True)
record.to_csv('../../../data/collated/collated_experiment_record_OD600_growth_curves.csv',
index=False)
#%%
shift_curves = []
_exp_phase = []
for i, cat in enumerate(valid_paths):
for j, path in enumerate(cat):
if i == 0:
exp_phase = pd.read_csv(f"{path}/output/{path.split('/')[-1]}_exponential_phase.csv")
exp_phase['experiment_type'] = exp_type[i]
exp_phase.rename(columns={'technical_replicate':'replicate',
'od_600nm':'od_600nm_subtracted'}, inplace=True)
_exp_phase.append(exp_phase)
elif i == 1:
exp_phase = pd.read_csv(f"{path}/output/{path.split('/')[-1]}_labeled_regions.csv")
shift_curves.append(exp_phase)
exp_phase.loc[exp_phase['phase']=='exponential_glucose', 'growth_medium'] = 'ga_preshift'
exp_phase.loc[exp_phase['phase']=='exponential_acetate', 'growth_medium'] = 'acetate' # exp_phase.loc[exp_phase['phase']=='exponential_acetate', 'od_600nm_subtracted'] -= exp_phase.loc[exp_phase['phase']=='exponential_acetate', 'od_600nm_subtracted'] -= shift
dfs = []
for g, d in exp_phase.groupby(['date', 'run_number', 'strain', 'replicate']):
shift = d[d['phase']=='shift']['od_600nm_subtracted'].median()
for _g, _d in d.groupby(['phase']):
if _g != 'exponential_acetate':
# _d['od_600nm_subtracted'] -= shift
dfs.append(_d)
exp_phase = pd.concat(dfs, sort=False)
exp_phase = exp_phase[exp_phase['phase'].str.contains('exponential')]
_exp_phase.append(exp_phase)
shift_curves = | pd.concat(shift_curves, sort=False) | pandas.concat |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_frame(self):
# normal DataFrame
dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
p = pd.period_range('2011-01', freq='M', periods=5)
df = pd.DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
self.assertEqual(repr(df), exp)
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({'int64': np.random.randint(100, size=n)})
df['category'] = Series(np.array(list('abcdefghij')).take(
np.random.randint(0, 10, size=n))).astype('category')
df.isnull()
df.info()
df2 = df[df['category'] == 'd']
df2.info()
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Series(Categorical(["a", "b", "c", "d"], categories=[
'd', 'c', 'b', 'a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Series(Categorical(
[np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
def test_mode(self):
s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5, 1], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True))
res = s.mode()
exp = Series(Categorical([], categories=[5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
s = pd.Series(pd.Categorical(
["a", "b", "c", "c", "c", "b"], categories=["c", "a", "b", "d"]))
res = s.value_counts(sort=False)
exp = Series([3, 1, 2, 0],
index=pd.CategoricalIndex(["c", "a", "b", "d"]))
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp = Series([3, 2, 1, 0],
index=pd.CategoricalIndex(["c", "b", "a", "d"]))
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# https://github.com/pydata/pandas/issues/9443
s = pd.Series(["a", "b", "a"], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
s = pd.Series(["a", "b", None, "a", None, None], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"])))
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
tm.assert_series_equal(
s.value_counts(dropna=False, sort=False),
pd.Series([2, 1, 3],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", "a"], categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1, 0],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", None, "a", None, None], categories=["a", "b", np.nan
]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1],
index=pd.CategoricalIndex([np.nan, "a", "b"])))
def test_groupby(self):
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"
], categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
expected = DataFrame({'a': Series(
[1, 2, 4, np.nan], index=pd.CategoricalIndex(
['a', 'b', 'c', 'd'], name='b'))})
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A")
exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A')
expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers
gb = df.groupby(['A', 'B'])
expected = DataFrame({'values': Series(
[1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan
], index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']], names=['A', 'B']))})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers with a non-cat
df = df.copy()
df['C'] = ['foo', 'bar'] * 2
gb = df.groupby(['A', 'B', 'C'])
expected = DataFrame({'values': Series(
np.nan, index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y'], ['foo', 'bar']
], names=['A', 'B', 'C']))}).sortlevel()
expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4]
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name)
g = x.groupby(['person_id'])
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
# Filter
tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
# GH 9603
df = pd.DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4])
result = df.groupby(c).apply(len)
expected = pd.Series([1, 0, 0, 0],
index=pd.CategoricalIndex(c.values.categories))
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
def test_pivot_table(self):
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'])
expected = Series([1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']],
names=['A', 'B']),
name='values')
tm.assert_series_equal(result, expected)
def test_count(self):
s = Series(Categorical([np.nan, 1, 2, np.nan],
categories=[5, 4, 3, 2, 1], ordered=True))
result = s.count()
self.assertEqual(result, 2)
def test_sort(self):
c = Categorical(["a", "b", "b", "a"], ordered=False)
cat = Series(c)
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
c.order()
# sort in the categories order
expected = Series(
Categorical(["a", "a", "b", "b"],
ordered=False), index=[0, 3, 1, 2])
result = cat.sort_values()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "c", "b", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(Categorical(["a", "c", "b", "d"], categories=[
"a", "b", "c", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res.__array__(), exp)
raw_cat1 = Categorical(["a", "b", "c", "d"],
categories=["a", "b", "c", "d"], ordered=False)
raw_cat2 = Categorical(["a", "b", "c", "d"],
categories=["d", "c", "b", "a"], ordered=True)
s = ["a", "b", "c", "d"]
df = DataFrame({"unsort": raw_cat1,
"sort": raw_cat2,
"string": s,
"values": [1, 2, 3, 4]})
# Cats must be sorted in a dataframe
res = df.sort_values(by=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
self.assertEqual(res["sort"].dtype, "category")
res = df.sort_values(by=["sort"], ascending=False)
exp = df.sort_values(by=["string"], ascending=True)
self.assert_numpy_array_equal(res["values"], exp["values"])
self.assertEqual(res["sort"].dtype, "category")
self.assertEqual(res["unsort"].dtype, "category")
# unordered cat, but we allow this
df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
# GH 7848
df = DataFrame({"id": [6, 5, 4, 3, 2, 1],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"], ordered=True)
df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
# sorts 'grade' according to the order of the categories
result = df.sort_values(by=['grade'])
expected = df.iloc[[1, 2, 5, 0, 3, 4]]
tm.assert_frame_equal(result, expected)
# multi
result = df.sort_values(by=['grade', 'id'])
expected = df.iloc[[2, 1, 5, 4, 3, 0]]
tm.assert_frame_equal(result, expected)
# reverse
cat = Categorical(["a", "c", "c", "b", "d"], ordered=True)
res = cat.sort_values(ascending=False)
exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
# some NaN positions
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
def test_slicing(self):
cat = Series(Categorical([1, 2, 3, 4]))
reversed = cat[::-1]
exp = np.array([4, 3, 2, 1])
self.assert_numpy_array_equal(reversed.__array__(), exp)
df = DataFrame({'value': (np.arange(100) + 1).astype('int64')})
df['D'] = pd.cut(df.value, bins=[0, 25, 50, 75, 100])
expected = Series([11, '(0, 25]'], index=['value', 'D'], name=10)
result = df.iloc[10]
tm.assert_series_equal(result, expected)
expected = DataFrame({'value': np.arange(11, 21).astype('int64')},
index=np.arange(10, 20).astype('int64'))
expected['D'] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100])
result = df.iloc[10:20]
tm.assert_frame_equal(result, expected)
expected = Series([9, '(0, 25]'], index=['value', 'D'], name=8)
result = df.loc[8]
tm.assert_series_equal(result, expected)
def test_slicing_and_getting_ops(self):
# systematically test the slicing operations:
# for all slicing ops:
# - returning a dataframe
# - returning a column
# - returning a row
# - returning a single value
cats = pd.Categorical(
["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 2, 3, 4, 5, 6, 7]
df = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
cats2 = pd.Categorical(["b", "c"], categories=["a", "b", "c"])
idx2 = pd.Index(["j", "k"])
values2 = [3, 4]
# 2:4,: | "j":"k",:
exp_df = pd.DataFrame({"cats": cats2, "values": values2}, index=idx2)
# :,"cats" | :,0
exp_col = pd.Series(cats, index=idx, name='cats')
# "j",: | 2,:
exp_row = pd.Series(["b", 3], index=["cats", "values"], dtype="object",
name="j")
# "j","cats | 2,0
exp_val = "b"
# iloc
# frame
res_df = df.iloc[2:4, :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.iloc[2, :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.iloc[2, 0]
self.assertEqual(res_val, exp_val)
# loc
# frame
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.loc["j", "cats"]
self.assertEqual(res_val, exp_val)
# ix
# frame
# res_df = df.ix["j":"k",[0,1]] # doesn't work?
res_df = df.ix["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.ix["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.ix[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.ix["j", 0]
self.assertEqual(res_val, exp_val)
# iat
res_val = df.iat[2, 0]
self.assertEqual(res_val, exp_val)
# at
res_val = df.at["j", "cats"]
self.assertEqual(res_val, exp_val)
# fancy indexing
exp_fancy = df.iloc[[2]]
res_fancy = df[df["cats"] == "b"]
tm.assert_frame_equal(res_fancy, exp_fancy)
res_fancy = df[df["values"] == 3]
tm.assert_frame_equal(res_fancy, exp_fancy)
# get_value
res_val = df.get_value("j", "cats")
self.assertEqual(res_val, exp_val)
# i : int, slice, or sequence of integers
res_row = df.iloc[2]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
res_df = df.iloc[slice(2, 4)]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[[2, 3]]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
res_df = df.iloc[:, slice(0, 2)]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[:, [0, 1]]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
def test_slicing_doc_examples(self):
# GH 7918
cats = Categorical(
["a", "b", "b", "b", "c", "c", "c"], categories=["a", "b", "c"])
idx = Index(["h", "i", "j", "k", "l", "m", "n", ])
values = [1, 2, 2, 2, 3, 4, 5]
df = DataFrame({"cats": cats, "values": values}, index=idx)
result = df.iloc[2:4, :]
expected = DataFrame(
{"cats": Categorical(
['b', 'b'], categories=['a', 'b', 'c']),
"values": [2, 2]}, index=['j', 'k'])
tm.assert_frame_equal(result, expected)
result = df.iloc[2:4, :].dtypes
expected = Series(['category', 'int64'], ['cats', 'values'])
tm.assert_series_equal(result, expected)
result = df.loc["h":"j", "cats"]
expected = Series(Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c']),
index=['h', 'i', 'j'], name='cats')
tm.assert_series_equal(result, expected)
result = df.ix["h":"j", 0:1]
expected = DataFrame({'cats': Series(
Categorical(
['a', 'b', 'b'], categories=['a', 'b', 'c']), index=['h', 'i',
'j'])})
tm.assert_frame_equal(result, expected)
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
cats = pd.Categorical(
["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
# changed single row
cats1 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx1 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = pd.DataFrame(
{"cats": cats1,
"values": values1}, index=idx1)
# changed multiple rows
cats2 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx2 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = pd.DataFrame(
{"cats": cats2,
"values": values2}, index=idx2)
# changed part of the cats column
cats3 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = pd.DataFrame(
{"cats": cats3,
"values": values3}, index=idx3)
# changed single value in cats col
cats4 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx4 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values4 = [1, 1, 1, 1, 1, 1, 1]
exp_single_cats_value = pd.DataFrame(
{"cats": cats4,
"values": values4}, index=idx4)
# iloc
# ###############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iloc[2, 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2, :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.iloc[2, :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4, :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.iloc[2:4, :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.iloc[2:4, 0] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.loc["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.loc["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.loc["j":"k", "cats"] = ["c", "c"]
# ix
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.ix["j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.ix[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.ix["j", 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.ix["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.ix["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.ix["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.ix["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.ix["j":"k", 0] = ["c", "c"]
# iat
df = orig.copy()
df.iat[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iat[2, 0] = "c"
self.assertRaises(ValueError, f)
# at
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.at["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.at["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# fancy indexing
catsf = pd.Categorical(
["a", "a", "c", "c", "a", "a", "a"], categories=["a", "b", "c"])
idxf = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
valuesf = [1, 1, 3, 3, 1, 1, 1]
df = pd.DataFrame({"cats": catsf, "values": valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
exp_fancy["cats"].cat.set_categories(["a", "b", "c"], inplace=True)
df[df["cats"] == "c"] = ["b", 2]
tm.assert_frame_equal(df, exp_multi_row)
# set_value
df = orig.copy()
df.set_value("j", "cats", "b")
| tm.assert_frame_equal(df, exp_single_cats_value) | pandas.util.testing.assert_frame_equal |
from scipy import stats
import numpy as np
import pandas as pd
import re
class PWR(object):
def __init__(self, weight=1, regress_to=None, values=None):
self.weight = weight
self.regress_to = regress_to
if values is None:
self.values = None
else:
self.values = values.copy()
def calculate(self, **kwargs):
self.pwrcol = [x for x in list(self.values) if x not in ['Player']][0]
return self
def regress(self, df):
self.values[self.pwrcol] = self.regress_to.regress(df, self.pwrcol)
class SRS(PWR):
def __init__(self, weight=1, regress_to=None):
PWR.__init__(self, weight, regress_to)
def calculate(self, **kwargs):
self.pwrcol = 'SRS'
if kwargs['season'] == 1:
df = kwargs['gamelog'].groupby('Player').agg({'Pts':'mean'})
df = df.rename(columns={'Pts':'SRS'}).reset_index()
self.values = df[['Player','SRS']]
else:
grouped = kwargs['gamelog'].groupby('Player').agg({'Difference':'sum','Opponent':lambda x: list(x)})
grouped['Games Played'] = grouped['Opponent'].str.len()
grouped['Margin'] = grouped['Difference'].values / grouped['Games Played'].values
grouped['SRS'] = grouped['Margin']
grouped['OldSRS'] = grouped['Margin']
players = grouped.to_dict('index')
for i in range(10000):
delta = 0.0
for name, player in players.items():
sos = 0.0
for opponent in player['Opponent']:
sos += players[opponent]['SRS']
players[name]['OldSRS'] = player['SRS']
players[name]['SRS'] = player['Margin'] + (sos / player['Games Played'])
delta = max(delta, abs(players[name]['SRS'] - players[name]['OldSRS']))
if delta < 0.001:
break
srs_sum = 0.0
for name, player in players.items():
srs_sum += players[name]['SRS']
srs_avg = srs_sum / len(players)
for name, player in players.items():
players[name]['SRS'] = player['SRS'] - srs_avg
df = pd.DataFrame.from_dict(players, orient='index').reset_index()
self.values = df.rename({'index':'Player'}, axis=1)[['Player','SRS']]
return self
class PWRsystems(object):
def __init__(self, regress_to=None, srs=None, others=None, scale=None):
self.regress_to = regress_to
self.systems = []
self.scale = None
if isinstance(scale, dict):
self.scale = scale
elif scale:
self.setDefaultScale()
if (srs is None) and (others is None):
self.systems.append(SRS())
else:
pairs = [(srs, SRS)]
for system in [{'Arg':x,'Class':y} for x, y in pairs]:
if type(system['Arg']) is bool:
if system['Arg']:
self.systems.append(system['Class']())
elif system['Arg'] is not None:
self.systems.append(system['Arg'])
if others is not None:
if isinstance(others, PWR):
self.systems.append(others)
else:
for system in others:
self.systems.append(system)
def setDefaultScale(self):
self.scale = {'st_dev':1.05,'mean':0}
def combine(self):
if (len(self.systems) > 1) and (self.scale is None):
self.setDefaultScale()
self.combined = self.systems[0].values[['Player']]
for system in self.systems:
self.combined = | pd.merge(self.combined, system.values, on='Player', suffixes=('','_')) | pandas.merge |
# -*- coding: utf-8 -*-
"""
.. module:: trend
:synopsis: Trend Indicators.
.. moduleauthor:: <NAME> (Bukosabino)
"""
import pandas as pd
import numpy as np
from .utils import *
def macd(close, n_fast=12, n_slow=26, fillna=False):
"""Moving Average Convergence Divergence (MACD)
Is a trend-following momentum indicator that shows the relationship between
two moving averages of prices.
https://en.wikipedia.org/wiki/MACD
Args:
close(pandas.Series): dataset 'Close' column.
n_fast(int): n period short-term.
n_slow(int): n period long-term.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
emafast = ema(close, n_fast, fillna)
emaslow = ema(close, n_slow, fillna)
macd = emafast - emaslow
if fillna:
macd = macd.replace([np.inf, -np.inf], np.nan).fillna(0)
return pd.Series(macd, name='MACD_%d_%d' % (n_fast, n_slow))
def macd_signal(close, n_fast=12, n_slow=26, n_sign=9, fillna=False):
"""Moving Average Convergence Divergence (MACD Signal)
Shows EMA of MACD.
https://en.wikipedia.org/wiki/MACD
Args:
close(pandas.Series): dataset 'Close' column.
n_fast(int): n period short-term.
n_slow(int): n period long-term.
n_sign(int): n period to signal.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
emafast = ema(close, n_fast, fillna)
emaslow = ema(close, n_slow, fillna)
macd = emafast - emaslow
macd_signal = ema(macd, n_sign, fillna)
if fillna:
macd_signal = macd_signal.replace([np.inf, -np.inf], np.nan).fillna(0)
return pd.Series(macd_signal, name='MACD_sign')
def macd_diff(close, n_fast=12, n_slow=26, n_sign=9, fillna=False):
"""Moving Average Convergence Divergence (MACD Diff)
Shows the relationship between MACD and MACD Signal.
https://en.wikipedia.org/wiki/MACD
Args:
close(pandas.Series): dataset 'Close' column.
n_fast(int): n period short-term.
n_slow(int): n period long-term.
n_sign(int): n period to signal.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
emafast = ema(close, n_fast, fillna)
emaslow = ema(close, n_slow, fillna)
macd = emafast - emaslow
macdsign = ema(macd, n_sign, fillna)
macd_diff = macd - macdsign
if fillna:
macd_diff = macd_diff.replace([np.inf, -np.inf], np.nan).fillna(0)
return pd.Series(macd_diff, name='MACD_diff')
def ema_indicator(close, n=12, fillna=False):
"""EMA
Exponential Moving Average via Pandas
Args:
close(pandas.Series): dataset 'Close' column.
n_fast(int): n period short-term.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
ema_ = ema(close, n, fillna)
return pd.Series(ema_, name='ema')
def adx(high, low, close, n=14, fillna=False):
"""Average Directional Movement Index (ADX)
The Plus Directional Indicator (+DI) and Minus Directional Indicator (-DI)
are derived from smoothed averages of these differences, and measure trend
direction over time. These two indicators are often referred to collectively
as the Directional Movement Indicator (DMI).
The Average Directional Index (ADX) is in turn derived from the smoothed
averages of the difference between +DI and -DI, and measures the strength
of the trend (regardless of direction) over time.
Using these three indicators together, chartists can determine both the
direction and strength of the trend.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:average_directional_index_adx
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
cs = close.shift(1)
pdm = high.combine(cs, lambda x1, x2: max(x1, x2) if np.isnan(x1) == False and np.isnan(x2) == False else np.nan)
pdn = low.combine(cs, lambda x1, x2: min(x1, x2) if np.isnan(x1) == False and np.isnan(x2) == False else np.nan)
tr = pdm - pdn
trs_initial = np.zeros(n-1)
trs = np.zeros(len(close) - (n - 1))
trs[0] = tr.dropna()[0:n].sum()
tr = tr.reset_index(drop=True)
for i in range(1, len(trs)-1):
trs[i] = trs[i-1] - (trs[i-1]/float(n)) + tr[n+i]
up = high - high.shift(1)
dn = low.shift(1) - low
pos = abs(((up > dn) & (up > 0)) * up)
neg = abs(((dn > up) & (dn > 0)) * dn)
dip_mio = np.zeros(len(close) - (n - 1))
dip_mio[0] = pos.dropna()[0:n].sum()
pos = pos.reset_index(drop=True)
for i in range(1, len(dip_mio)-1):
dip_mio[i] = dip_mio[i-1] - (dip_mio[i-1]/float(n)) + pos[n+i]
din_mio = np.zeros(len(close) - (n - 1))
din_mio[0] = neg.dropna()[0:n].sum()
neg = neg.reset_index(drop=True)
for i in range(1, len(din_mio)-1):
din_mio[i] = din_mio[i-1] - (din_mio[i-1]/float(n)) + neg[n+i]
dip = np.zeros(len(trs))
for i in range(len(trs)):
dip[i] = 100 * (dip_mio[i]/trs[i])
din = np.zeros(len(trs))
for i in range(len(trs)):
din[i] = 100 * (din_mio[i]/trs[i])
dx = 100 * np.abs((dip - din) / (dip + din))
adx = np.zeros(len(trs))
adx[n] = dx[0:n].mean()
for i in range(n+1, len(adx)):
adx[i] = ((adx[i-1] * (n - 1)) + dx[i-1]) / float(n)
adx = np.concatenate((trs_initial, adx), axis=0)
adx = pd.Series(data=adx, index=close.index)
if fillna:
adx = adx.replace([np.inf, -np.inf], np.nan).fillna(20)
return pd.Series(adx, name='adx')
def adx_pos(high, low, close, n=14, fillna=False):
"""Average Directional Movement Index Positive (ADX)
The Plus Directional Indicator (+DI) and Minus Directional Indicator (-DI)
are derived from smoothed averages of these differences, and measure trend
direction over time. These two indicators are often referred to collectively
as the Directional Movement Indicator (DMI).
The Average Directional Index (ADX) is in turn derived from the smoothed
averages of the difference between +DI and -DI, and measures the strength
of the trend (regardless of direction) over time.
Using these three indicators together, chartists can determine both the
direction and strength of the trend.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:average_directional_index_adx
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
cs = close.shift(1)
pdm = high.combine(cs, lambda x1, x2: max(x1, x2) if np.isnan(x1) == False and np.isnan(x2) == False else np.nan)
pdn = low.combine(cs, lambda x1, x2: min(x1, x2) if np.isnan(x1) == False and np.isnan(x2) == False else np.nan)
tr = pdm - pdn
trs_initial = np.zeros(n-1)
trs = np.zeros(len(close) - (n - 1))
trs[0] = tr.dropna()[0:n].sum()
tr = tr.reset_index(drop=True)
for i in range(1, len(trs)-1):
trs[i] = trs[i-1] - (trs[i-1]/float(n)) + tr[n+i]
up = high - high.shift(1)
dn = low.shift(1) - low
pos = abs(((up > dn) & (up > 0)) * up)
neg = abs(((dn > up) & (dn > 0)) * dn)
dip_mio = np.zeros(len(close) - (n - 1))
dip_mio[0] = pos.dropna()[0:n].sum()
pos = pos.reset_index(drop=True)
for i in range(1, len(dip_mio)-1):
dip_mio[i] = dip_mio[i-1] - (dip_mio[i-1]/float(n)) + pos[n+i]
dip = np.zeros(len(close))
for i in range(1, len(trs)-1):
dip[i+n] = 100 * (dip_mio[i]/trs[i])
dip = pd.Series(data=dip, index=close.index)
if fillna:
dip = dip.replace([np.inf, -np.inf], np.nan).fillna(20)
return pd.Series(dip, name='adx_pos')
def adx_neg(high, low, close, n=14, fillna=False):
"""Average Directional Movement Index Negative (ADX)
The Plus Directional Indicator (+DI) and Minus Directional Indicator (-DI)
are derived from smoothed averages of these differences, and measure trend
direction over time. These two indicators are often referred to collectively
as the Directional Movement Indicator (DMI).
The Average Directional Index (ADX) is in turn derived from the smoothed
averages of the difference between +DI and -DI, and measures the strength
of the trend (regardless of direction) over time.
Using these three indicators together, chartists can determine both the
direction and strength of the trend.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:average_directional_index_adx
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
cs = close.shift(1)
pdm = high.combine(cs, lambda x1, x2: max(x1, x2) if np.isnan(x1) == False and np.isnan(x2) == False else np.nan)
pdn = low.combine(cs, lambda x1, x2: min(x1, x2) if np.isnan(x1) == False and np.isnan(x2) == False else np.nan)
tr = pdm - pdn
trs_initial = np.zeros(n-1)
trs = np.zeros(len(close) - (n - 1))
trs[0] = tr.dropna()[0:n].sum()
tr = tr.reset_index(drop=True)
for i in range(1, len(trs)-1):
trs[i] = trs[i-1] - (trs[i-1]/float(n)) + tr[n+i]
up = high - high.shift(1)
dn = low.shift(1) - low
pos = abs(((up > dn) & (up > 0)) * up)
neg = abs(((dn > up) & (dn > 0)) * dn)
din_mio = np.zeros(len(close) - (n - 1))
din_mio[0] = neg.dropna()[0:n].sum()
neg = neg.reset_index(drop=True)
for i in range(1, len(din_mio)-1):
din_mio[i] = din_mio[i-1] - (din_mio[i-1]/float(n)) + neg[n+i]
din = np.zeros(len(close))
for i in range(1, len(trs)-1):
din[i+n] = 100 * (din_mio[i]/float(trs[i]))
din = pd.Series(data=din, index=close.index)
if fillna:
din = din.replace([np.inf, -np.inf], np.nan).fillna(20)
return pd.Series(din, name='adx_neg')
def vortex_indicator_pos(high, low, close, n=14, fillna=False):
"""Vortex Indicator (VI)
It consists of two oscillators that capture positive and negative trend
movement. A bullish signal triggers when the positive trend indicator
crosses above the negative trend indicator or a key level.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:vortex_indicator
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
tr = high.combine(close.shift(1), max) - low.combine(close.shift(1), min)
trn = tr.rolling(n).sum()
vmp = np.abs(high - low.shift(1))
vmm = np.abs(low - high.shift(1))
vip = vmp.rolling(n).sum() / trn
if fillna:
vip = vip.replace([np.inf, -np.inf], np.nan).fillna(1)
return pd.Series(vip, name='vip')
def vortex_indicator_neg(high, low, close, n=14, fillna=False):
"""Vortex Indicator (VI)
It consists of two oscillators that capture positive and negative trend
movement. A bearish signal triggers when the negative trend indicator
crosses above the positive trend indicator or a key level.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:vortex_indicator
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
tr = high.combine(close.shift(1), max) - low.combine(close.shift(1), min)
trn = tr.rolling(n).sum()
vmp = np.abs(high - low.shift(1))
vmm = np.abs(low - high.shift(1))
vin = vmm.rolling(n).sum() / trn
if fillna:
vin = vin.replace([np.inf, -np.inf], np.nan).fillna(1)
return pd.Series(vin, name='vin')
def trix(close, n=15, fillna=False):
"""Trix (TRIX)
Shows the percent rate of change of a triple exponentially smoothed moving
average.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:trix
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
ema1 = ema(close, n, fillna)
ema2 = ema(ema1, n, fillna)
ema3 = ema(ema2, n, fillna)
trix = (ema3 - ema3.shift(1)) / ema3.shift(1)
trix *= 100
if fillna:
trix = trix.replace([np.inf, -np.inf], np.nan).fillna(0)
return pd.Series(trix, name='trix_'+str(n))
def mass_index(high, low, n=9, n2=25, fillna=False):
"""Mass Index (MI)
It uses the high-low range to identify trend reversals based on range
expansions. It identifies range bulges that can foreshadow a reversal of the
current trend.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:mass_index
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
n(int): n low period.
n2(int): n high period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
amplitude = high - low
ema1 = ema(amplitude, n, fillna)
ema2 = ema(ema1, n, fillna)
mass = ema1 / ema2
mass = mass.rolling(n2).sum()
if fillna:
mass = mass.replace([np.inf, -np.inf], np.nan).fillna(n2)
return pd.Series(mass, name='mass_index_'+str(n))
def cci(high, low, close, n=20, c=0.015, fillna=False):
"""Commodity Channel Index (CCI)
CCI measures the difference between a security's price change and its
average price change. High positive readings indicate that prices are well
above their average, which is a show of strength. Low negative readings
indicate that prices are well below their average, which is a show of
weakness.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:commodity_channel_index_cci
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
n(int): n period.
c(int): constant.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
pp = (high + low + close) / 3.0
cci = (pp - pp.rolling(n).mean()) / (c * pp.rolling(n).std())
if fillna:
cci = cci.replace([np.inf, -np.inf], np.nan).fillna(0)
return pd.Series(cci, name='cci')
def dpo(close, n=20, fillna=False):
"""Detrended Price Oscillator (DPO)
Is an indicator designed to remove trend from price and make it easier to
identify cycles.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:detrended_price_osci
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
dpo = close.shift(int((0.5 * n) + 1)) - close.rolling(n).mean()
if fillna:
dpo = dpo.replace([np.inf, -np.inf], np.nan).fillna(0)
return pd.Series(dpo, name='dpo_'+str(n))
def kst(close, r1=10, r2=15, r3=20, r4=30, n1=10, n2=10, n3=10, n4=15, fillna=False):
"""KST Oscillator (KST)
It is useful to identify major stock market cycle junctures because its
formula is weighed to be more greatly influenced by the longer and more
dominant time spans, in order to better reflect the primary swings of stock
market cycle.
https://en.wikipedia.org/wiki/KST_oscillator
Args:
close(pandas.Series): dataset 'Close' column.
r1(int): r1 period.
r2(int): r2 period.
r3(int): r3 period.
r4(int): r4 period.
n1(int): n1 smoothed period.
n2(int): n2 smoothed period.
n3(int): n3 smoothed period.
n4(int): n4 smoothed period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
rocma1 = ((close - close.shift(r1)) / close.shift(r1)).rolling(n1).mean()
rocma2 = ((close - close.shift(r2)) / close.shift(r2)).rolling(n2).mean()
rocma3 = ((close - close.shift(r3)) / close.shift(r3)).rolling(n3).mean()
rocma4 = ((close - close.shift(r4)) / close.shift(r4)).rolling(n4).mean()
kst = 100 * (rocma1 + 2 * rocma2 + 3 * rocma3 + 4 * rocma4)
if fillna:
kst = kst.replace([np.inf, -np.inf], np.nan).fillna(0)
return pd.Series(kst, name='kst')
def kst_sig(close, r1=10, r2=15, r3=20, r4=30, n1=10, n2=10, n3=10, n4=15, nsig=9, fillna=False):
"""KST Oscillator (KST Signal)
It is useful to identify major stock market cycle junctures because its
formula is weighed to be more greatly influenced by the longer and more
dominant time spans, in order to better reflect the primary swings of stock
market cycle.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:know_sure_thing_kst
Args:
close(pandas.Series): dataset 'Close' column.
r1(int): r1 period.
r2(int): r2 period.
r3(int): r3 period.
r4(int): r4 period.
n1(int): n1 smoothed period.
n2(int): n2 smoothed period.
n3(int): n3 smoothed period.
n4(int): n4 smoothed period.
nsig(int): n period to signal.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
rocma1 = ((close - close.shift(r1)) / close.shift(r1)).rolling(n1).mean()
rocma2 = ((close - close.shift(r2)) / close.shift(r2)).rolling(n2).mean()
rocma3 = ((close - close.shift(r3)) / close.shift(r3)).rolling(n3).mean()
rocma4 = ((close - close.shift(r4)) / close.shift(r4)).rolling(n4).mean()
kst = 100 * (rocma1 + 2 * rocma2 + 3 * rocma3 + 4 * rocma4)
kst_sig = kst.rolling(nsig).mean()
if fillna:
kst_sig = kst_sig.replace([np.inf, -np.inf], np.nan).fillna(0)
return | pd.Series(kst_sig, name='kst_sig') | pandas.Series |
# -*- coding: utf-8 -*-
class DictionaryResult:
""" Main class of library """
def __init__(self, results):
self.results = results
def help(self):
print("""
[HELP] PicSureHpdsLib.Client(connection).useResource(uuid).dictionary().find(term)
.count() Returns the number of entries in the dictionary that match the given term
.keys() Return the keys of the matching entries
.entries() Return a list of matching dictionary entries
.DataFrame() Return the entries in a Pandas-compatible format
[Examples]
results = PicSureHpdsLib.Client(connection).useResource(uuid).dictionary().find("asthma")
df = results.DataFrame()
""")
def count(self):
return len(self.results['results'])
def keys(self):
return list(self.results['results'])
def entries(self):
ret = []
for key in self.results['results']:
ret.append(self.results['results'][key])
return ret
def DataFrame(self):
import pandas
ret = {}
# return an empty dataframe if there are no records
if len(self.results['results']) == 0:
return | pandas.DataFrame(data=ret) | pandas.DataFrame |
#!/usr/bin/env python3
import argparse
import sys
import numpy as np
import pandas as pd
import sklearn.datasets
import sklearn.metrics
import sklearn.model_selection
class DecisionTree:
def __init__(
self,
max_depth=None,
min_to_split=2,
max_leaves=None,
criterion="gini",
depth=0,
leaves=0,
):
self.max_depth = max_depth
self.min_to_split = min_to_split
self.max_leaves = max_leaves
self.criterion = criterion
self.leaves = leaves
self.depth = depth
def _entropy(self, targets):
count = targets.shape[0]
value_counts = pd.Series(targets).value_counts(normalize=True)
return -count * np.sum(value_counts.values * np.log(value_counts.values))
def _gini(self, targets):
count = targets.shape[0]
value_counts = pd.Series(targets).value_counts(normalize=True)
return count * np.sum(value_counts.values * (1 - value_counts.values))
def _evaluate(self, targets):
if self.criterion == "gini":
return self._gini(targets)
if self.criterion == "entropy":
return self._entropy(targets)
return None
def get_split_points(self, feature):
feature = np.sort(np.unique(feature))
if feature.shape[0] <= 1:
return None
split_points = np.mean(list(zip(feature[:-1], feature[1:])), axis=1)
return split_points
def get_best_split_point(self, feature, targets):
split_points = self.get_split_points(feature)
if split_points is None:
return None, -np.inf
split_scores = []
for split_point in split_points:
left_targets = targets[feature <= split_point]
right_targets = targets[feature >= split_point]
score_left = self._evaluate(left_targets)
score_right = self._evaluate(right_targets)
score_split = score_left + score_right - self.score
split_scores.append(score_split)
best_split_index = np.argmin(split_scores)
return split_points[best_split_index], split_scores[best_split_index]
def split(self, features, targets):
self.score = self._evaluate(targets)
if (not self.min_to_split is None) and (targets.shape[0] < self.min_to_split):
self.is_leaf = True
self.class_proba = | pd.Series(targets) | pandas.Series |
import pandas as pd
import numpy as np
# Loading libraries for modeling
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix, recall_score
import time
import pickle
import argparse
import sys
import os
import cProfile, pstats, io
#warnings.filterwarnings("ignore")
'''USAGE
python3 src/modules/modelling/model.py \
--input_file='output/preprocessed_data/preprocessed_sentences.tsv' \
--eda_file='yes'
'''
in_file_name = r'output/preprocessed_data/preprocessed_sentences.tsv'
out_file = r'output/predictions/predictions_train.tsv'
eda_file = r'output/eda/probabilities_percentiles_df.tsv'
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input_file', type=str, default=in_file_name,
help='Directory where your input data is.')
parser.add_argument('--output_file', type=str, default=out_file,
help='Directory where your input data is.')
parser.add_argument('--eda_file', type=str, default='no',
help='Do EDA files yes/no.')
args = parser.parse_args()
X_train, y_train, dataframe = prepare_data(file = args.input_file)
print("Using CountVectorizer to preprocess data...")
# Start training
clf = train(X_train, y_train)
print("Training model...")
y_pred, y_proba = predictions(X_train, y_train)
print("Completed metrics")
guessed_label = pd.DataFrame(y_pred)
actual_label = | pd.DataFrame(y_train) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pandas._testing as tm
import ibis
def test_map_length_expr(t):
expr = t.map_of_integers_strings.length()
result = expr.execute()
expected = pd.Series([0, None, 2], name='map_of_integers_strings')
tm.assert_series_equal(result, expected)
def test_map_value_for_key_expr(t):
expr = t.map_of_integers_strings[1]
result = expr.execute()
expected = pd.Series([None, None, 'a'], name='map_of_integers_strings')
tm.assert_series_equal(result, expected)
def test_map_value_or_default_for_key_expr(t):
expr = t.map_of_complex_values.get('a')
result = expr.execute()
expected = pd.Series(
[None, [1, 2, 3], None], dtype='object', name='map_of_complex_values'
)
tm.assert_series_equal(result, expected)
def safe_sorter(element):
return np.sort(element) if isinstance(element, np.ndarray) else element
def test_map_keys_expr(t):
expr = t.map_of_strings_integers.keys()
result = expr.execute().map(safe_sorter)
expected = pd.Series(
np.array([['a', 'b'], None, []], dtype='object'),
dtype='object',
name='map_of_strings_integers',
)
tm.assert_series_equal(result, expected)
def test_map_keys_scalar(client, t):
expr = ibis.literal({'a': 10, 'b': 50, 'c': 20, 'd': 40})
expr = expr.keys()
result = client.execute(expr)
expected = np.array(['a', 'b', 'c', 'd'])
tm.assert_numpy_array_equal(result, expected)
def test_map_values_expr(t):
expr = t.map_of_complex_values.values()
result = expr.execute().map(safe_sorter)
expected = pd.Series(
[None, np.array([[], [1, 2, 3]], dtype='object'), np.array([])],
dtype='object',
name='map_of_complex_values',
)
tm.assert_series_equal(result, expected)
def test_map_values_scalar(client, t):
expr = ibis.literal({'a': 10, 'b': 50, 'c': 20, 'd': 40})
expr = expr.values()
result = client.execute(expr)
expected = np.array([10, 50, 20, 40])
tm.assert_numpy_array_equal(result, expected)
def test_map_concat_expr(t):
expr = t.map_of_complex_values + {'b': [4, 5, 6], 'c': [], 'a': []}
result = expr.execute()
expected = pd.Series(
[
None,
{'a': [], 'b': [4, 5, 6], 'c': []},
{'b': [4, 5, 6], 'c': [], 'a': []},
],
dtype='object',
name='map_of_complex_values',
)
tm.assert_series_equal(result, expected)
def test_map_value_for_key_literal_broadcast(t):
lookup_table = ibis.literal({'a': 1, 'b': 2, 'c': 3, 'd': 4})
expr = lookup_table.get(t.dup_strings)
result = expr.execute()
expected = | pd.Series([4, 1, 4], name='dup_strings') | pandas.Series |
"""
Unit test suite for OLS and PanelOLS classes
"""
# pylint: disable-msg=W0212
from __future__ import division
from datetime import datetime
import unittest
import nose
import numpy as np
from pandas import date_range, bdate_range
from pandas.core.panel import Panel
from pandas import DataFrame, Index, Series, notnull, datetools
from pandas.stats.api import ols
from pandas.stats.ols import _filter_data
from pandas.stats.plm import NonPooledPanelOLS, PanelOLS
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from common import BaseTest
_have_statsmodels = True
try:
import statsmodels.api as sm
except ImportError:
try:
import scikits.statsmodels.api as sm
except ImportError:
_have_statsmodels = False
def _check_repr(obj):
repr(obj)
str(obj)
def _compare_ols_results(model1, model2):
assert(type(model1) == type(model2))
if hasattr(model1, '_window_type'):
_compare_moving_ols(model1, model2)
else:
_compare_fullsample_ols(model1, model2)
def _compare_fullsample_ols(model1, model2):
assert_series_equal(model1.beta, model2.beta)
def _compare_moving_ols(model1, model2):
assert_frame_equal(model1.beta, model2.beta)
class TestOLS(BaseTest):
# TODO: Add tests for OLS y predict
# TODO: Right now we just check for consistency between full-sample and
# rolling/expanding results of the panel OLS. We should also cross-check
# with trusted implementations of panel OLS (e.g. R).
# TODO: Add tests for non pooled OLS.
@classmethod
def setUpClass(cls):
try:
import matplotlib as mpl
mpl.use('Agg', warn=False)
except ImportError:
pass
if not _have_statsmodels:
raise nose.SkipTest
def testOLSWithDatasets(self):
self.checkDataSet(sm.datasets.ccard.load(), skip_moving=True)
self.checkDataSet(sm.datasets.cpunish.load(), skip_moving=True)
self.checkDataSet(sm.datasets.longley.load(), skip_moving=True)
self.checkDataSet(sm.datasets.stackloss.load(), skip_moving=True)
self.checkDataSet(sm.datasets.copper.load())
self.checkDataSet(sm.datasets.scotland.load())
# degenerate case fails on some platforms
# self.checkDataSet(datasets.ccard.load(), 39, 49) # one col in X all 0s
def testWLS(self):
X = DataFrame(np.random.randn(30, 4), columns=['A', 'B', 'C', 'D'])
Y = Series(np.random.randn(30))
weights = X.std(1)
self._check_wls(X, Y, weights)
weights.ix[[5, 15]] = np.nan
Y[[2, 21]] = np.nan
self._check_wls(X, Y, weights)
def _check_wls(self, x, y, weights):
result = ols(y=y, x=x, weights=1/weights)
combined = x.copy()
combined['__y__'] = y
combined['__weights__'] = weights
combined = combined.dropna()
endog = combined.pop('__y__').values
aweights = combined.pop('__weights__').values
exog = sm.add_constant(combined.values, prepend=False)
sm_result = sm.WLS(endog, exog, weights=1/aweights).fit()
assert_almost_equal(sm_result.params, result._beta_raw)
assert_almost_equal(sm_result.resid, result._resid_raw)
self.checkMovingOLS('rolling', x, y, weights=weights)
self.checkMovingOLS('expanding', x, y, weights=weights)
def checkDataSet(self, dataset, start=None, end=None, skip_moving=False):
exog = dataset.exog[start : end]
endog = dataset.endog[start : end]
x = DataFrame(exog, index=np.arange(exog.shape[0]),
columns=np.arange(exog.shape[1]))
y = Series(endog, index=np.arange(len(endog)))
self.checkOLS(exog, endog, x, y)
if not skip_moving:
self.checkMovingOLS('rolling', x, y)
self.checkMovingOLS('rolling', x, y, nw_lags=0)
self.checkMovingOLS('expanding', x, y, nw_lags=0)
self.checkMovingOLS('rolling', x, y, nw_lags=1)
self.checkMovingOLS('expanding', x, y, nw_lags=1)
self.checkMovingOLS('expanding', x, y, nw_lags=1, nw_overlap=True)
def checkOLS(self, exog, endog, x, y):
reference = sm.OLS(endog, sm.add_constant(exog, prepend=False)).fit()
result = ols(y=y, x=x)
# check that sparse version is the same
sparse_result = ols(y=y.to_sparse(), x=x.to_sparse())
_compare_ols_results(result, sparse_result)
assert_almost_equal(reference.params, result._beta_raw)
assert_almost_equal(reference.df_model, result._df_model_raw)
assert_almost_equal(reference.df_resid, result._df_resid_raw)
assert_almost_equal(reference.fvalue, result._f_stat_raw[0])
assert_almost_equal(reference.pvalues, result._p_value_raw)
assert_almost_equal(reference.rsquared, result._r2_raw)
assert_almost_equal(reference.rsquared_adj, result._r2_adj_raw)
assert_almost_equal(reference.resid, result._resid_raw)
assert_almost_equal(reference.bse, result._std_err_raw)
assert_almost_equal(reference.tvalues, result._t_stat_raw)
assert_almost_equal(reference.cov_params(), result._var_beta_raw)
assert_almost_equal(reference.fittedvalues, result._y_fitted_raw)
_check_non_raw_results(result)
def checkMovingOLS(self, window_type, x, y, weights=None, **kwds):
window = sm.tools.tools.rank(x.values) * 2
moving = ols(y=y, x=x, weights=weights, window_type=window_type,
window=window, **kwds)
# check that sparse version is the same
sparse_moving = ols(y=y.to_sparse(), x=x.to_sparse(),
weights=weights,
window_type=window_type,
window=window, **kwds)
_compare_ols_results(moving, sparse_moving)
index = moving._index
for n, i in enumerate(moving._valid_indices):
if window_type == 'rolling' and i >= window:
prior_date = index[i - window + 1]
else:
prior_date = index[0]
date = index[i]
x_iter = {}
for k, v in x.iteritems():
x_iter[k] = v.truncate(before=prior_date, after=date)
y_iter = y.truncate(before=prior_date, after=date)
static = ols(y=y_iter, x=x_iter, weights=weights, **kwds)
self.compare(static, moving, event_index=i,
result_index=n)
_check_non_raw_results(moving)
FIELDS = ['beta', 'df', 'df_model', 'df_resid', 'f_stat', 'p_value',
'r2', 'r2_adj', 'rmse', 'std_err', 't_stat',
'var_beta']
def compare(self, static, moving, event_index=None,
result_index=None):
index = moving._index
# Check resid if we have a time index specified
if event_index is not None:
ref = static._resid_raw[-1]
label = index[event_index]
res = moving.resid[label]
assert_almost_equal(ref, res)
ref = static._y_fitted_raw[-1]
res = moving.y_fitted[label]
assert_almost_equal(ref, res)
# Check y_fitted
for field in self.FIELDS:
attr = '_%s_raw' % field
ref = getattr(static, attr)
res = getattr(moving, attr)
if result_index is not None:
res = res[result_index]
assert_almost_equal(ref, res)
def test_ols_object_dtype(self):
df = DataFrame(np.random.randn(20, 2), dtype=object)
model = ols(y=df[0], x=df[1])
summary = repr(model)
class TestOLSMisc(unittest.TestCase):
'''
For test coverage with faux data
'''
@classmethod
def setupClass(cls):
if not _have_statsmodels:
raise nose.SkipTest
def test_f_test(self):
x = tm.makeTimeDataFrame()
y = x.pop('A')
model = ols(y=y, x=x)
hyp = '1*B+1*C+1*D=0'
result = model.f_test(hyp)
hyp = ['1*B=0',
'1*C=0',
'1*D=0']
result = model.f_test(hyp)
assert_almost_equal(result['f-stat'], model.f_stat['f-stat'])
self.assertRaises(Exception, model.f_test, '1*A=0')
def test_r2_no_intercept(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
x_with = x.copy()
x_with['intercept'] = 1.
model1 = ols(y=y, x=x)
model2 = ols(y=y, x=x_with, intercept=False)
assert_series_equal(model1.beta, model2.beta)
# TODO: can we infer whether the intercept is there...
self.assert_(model1.r2 != model2.r2)
# rolling
model1 = ols(y=y, x=x, window=20)
model2 = ols(y=y, x=x_with, window=20, intercept=False)
assert_frame_equal(model1.beta, model2.beta)
self.assert_((model1.r2 != model2.r2).all())
def test_summary_many_terms(self):
x = DataFrame(np.random.randn(100, 20))
y = np.random.randn(100)
model = ols(y=y, x=x)
model.summary
def test_y_predict(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
model1 = ols(y=y, x=x)
assert_series_equal(model1.y_predict, model1.y_fitted)
assert_almost_equal(model1._y_predict_raw, model1._y_fitted_raw)
def test_predict(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
model1 = ols(y=y, x=x)
assert_series_equal(model1.predict(), model1.y_predict)
assert_series_equal(model1.predict(x=x), model1.y_predict)
assert_series_equal(model1.predict(beta=model1.beta), model1.y_predict)
exog = x.copy()
exog['intercept'] = 1.
rs = Series(np.dot(exog.values, model1.beta.values), x.index)
assert_series_equal(model1.y_predict, rs)
x2 = x.reindex(columns=x.columns[::-1])
assert_series_equal(model1.predict(x=x2), model1.y_predict)
x3 = x2 + 10
pred3 = model1.predict(x=x3)
x3['intercept'] = 1.
x3 = x3.reindex(columns = model1.beta.index)
expected = Series(np.dot(x3.values, model1.beta.values), x3.index)
assert_series_equal(expected, pred3)
beta = Series(0., model1.beta.index)
pred4 = model1.predict(beta=beta)
assert_series_equal(Series(0., pred4.index), pred4)
def test_predict_longer_exog(self):
exogenous = {"1998": "4760","1999": "5904","2000": "4504",
"2001": "9808","2002": "4241","2003": "4086",
"2004": "4687","2005": "7686","2006": "3740",
"2007": "3075","2008": "3753","2009": "4679",
"2010": "5468","2011": "7154","2012": "4292",
"2013": "4283","2014": "4595","2015": "9194",
"2016": "4221","2017": "4520"}
endogenous = {"1998": "691", "1999": "1580", "2000": "80",
"2001": "1450", "2002": "555", "2003": "956",
"2004": "877", "2005": "614", "2006": "468",
"2007": "191"}
endog = Series(endogenous)
exog = Series(exogenous)
model = ols(y=endog, x=exog)
pred = model.y_predict
self.assert_(pred.index.equals(exog.index))
def test_longpanel_series_combo(self):
wp = tm.makePanel()
lp = wp.to_frame()
y = lp.pop('ItemA')
model = ols(y=y, x=lp, entity_effects=True, window=20)
self.assert_(notnull(model.beta.values).all())
self.assert_(isinstance(model, PanelOLS))
model.summary
def test_series_rhs(self):
y = tm.makeTimeSeries()
x = tm.makeTimeSeries()
model = ols(y=y, x=x)
expected = ols(y=y, x={'x' : x})
assert_series_equal(model.beta, expected.beta)
def test_various_attributes(self):
# just make sure everything "works". test correctness elsewhere
x = DataFrame(np.random.randn(100, 5))
y = np.random.randn(100)
model = ols(y=y, x=x, window=20)
series_attrs = ['rank', 'df', 'forecast_mean', 'forecast_vol']
for attr in series_attrs:
value = getattr(model, attr)
self.assert_(isinstance(value, Series))
# works
model._results
def test_catch_regressor_overlap(self):
df1 = tm.makeTimeDataFrame().ix[:, ['A', 'B']]
df2 = tm.makeTimeDataFrame().ix[:, ['B', 'C', 'D']]
y = tm.makeTimeSeries()
data = {'foo' : df1, 'bar' : df2}
self.assertRaises(Exception, ols, y=y, x=data)
def test_plm_ctor(self):
y = tm.makeTimeDataFrame()
x = {'a' : tm.makeTimeDataFrame(),
'b' : tm.makeTimeDataFrame()}
model = ols(y=y, x=x, intercept=False)
model.summary
model = ols(y=y, x=Panel(x))
model.summary
def test_plm_attrs(self):
y = tm.makeTimeDataFrame()
x = {'a' : tm.makeTimeDataFrame(),
'b' : tm.makeTimeDataFrame()}
rmodel = ols(y=y, x=x, window=10)
model = ols(y=y, x=x)
model.resid
rmodel.resid
def test_plm_lagged_y_predict(self):
y = tm.makeTimeDataFrame()
x = {'a' : tm.makeTimeDataFrame(),
'b' : tm.makeTimeDataFrame()}
model = ols(y=y, x=x, window=10)
result = model.lagged_y_predict(2)
def test_plm_f_test(self):
y = tm.makeTimeDataFrame()
x = {'a' : tm.makeTimeDataFrame(),
'b' : tm.makeTimeDataFrame()}
model = ols(y=y, x=x)
hyp = '1*a+1*b=0'
result = model.f_test(hyp)
hyp = ['1*a=0',
'1*b=0']
result = model.f_test(hyp)
assert_almost_equal(result['f-stat'], model.f_stat['f-stat'])
def test_plm_exclude_dummy_corner(self):
y = tm.makeTimeDataFrame()
x = {'a' : tm.makeTimeDataFrame(),
'b' : tm.makeTimeDataFrame()}
model = ols(y=y, x=x, entity_effects=True, dropped_dummies={'entity' : 'D'})
model.summary
self.assertRaises(Exception, ols, y=y, x=x, entity_effects=True,
dropped_dummies={'entity' : 'E'})
def test_columns_tuples_summary(self):
# #1837
X = DataFrame(np.random.randn(10, 2), columns=[('a', 'b'), ('c', 'd')])
Y = Series(np.random.randn(10))
# it works!
model = ols(y=Y, x=X)
model.summary
class TestPanelOLS(BaseTest):
FIELDS = ['beta', 'df', 'df_model', 'df_resid', 'f_stat',
'p_value', 'r2', 'r2_adj', 'rmse', 'std_err',
't_stat', 'var_beta']
_other_fields = ['resid', 'y_fitted']
def testFiltering(self):
result = ols(y=self.panel_y2, x=self.panel_x2)
x = result._x
index = x.index.get_level_values(0)
index = Index(sorted(set(index)))
exp_index = Index([datetime(2000, 1, 1), datetime(2000, 1, 3)])
self.assertTrue;(exp_index.equals(index))
index = x.index.get_level_values(1)
index = Index(sorted(set(index)))
exp_index = Index(['A', 'B'])
self.assertTrue(exp_index.equals(index))
x = result._x_filtered
index = x.index.get_level_values(0)
index = Index(sorted(set(index)))
exp_index = Index([datetime(2000, 1, 1),
datetime(2000, 1, 3),
datetime(2000, 1, 4)])
self.assertTrue(exp_index.equals(index))
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = [[6, 14, 1],
[9, 17, 1],
[30, 48, 1]]
assert_almost_equal(exp_x, result._x.values)
exp_x_filtered = [[6, 14, 1],
[9, 17, 1],
[30, 48, 1],
[11, 20, 1],
[12, 21, 1]]
assert_almost_equal(exp_x_filtered, result._x_filtered.values)
self.assertTrue(result._x_filtered.index.levels[0].equals(
result.y_fitted.index))
def test_wls_panel(self):
y = tm.makeTimeDataFrame()
x = Panel({'x1' : tm.makeTimeDataFrame(),
'x2' : tm.makeTimeDataFrame()})
y.ix[[1, 7], 'A'] = np.nan
y.ix[[6, 15], 'B'] = np.nan
y.ix[[3, 20], 'C'] = np.nan
y.ix[[5, 11], 'D'] = np.nan
stack_y = y.stack()
stack_x = DataFrame(dict((k, v.stack())
for k, v in x.iterkv()))
weights = x.std('items')
stack_weights = weights.stack()
stack_y.index = stack_y.index._tuple_index
stack_x.index = stack_x.index._tuple_index
stack_weights.index = stack_weights.index._tuple_index
result = ols(y=y, x=x, weights=1/weights)
expected = ols(y=stack_y, x=stack_x, weights=1/stack_weights)
assert_almost_equal(result.beta, expected.beta)
for attr in ['resid', 'y_fitted']:
rvals = getattr(result, attr).stack().values
evals = getattr(expected, attr).values
assert_almost_equal(rvals, evals)
def testWithTimeEffects(self):
result = ols(y=self.panel_y2, x=self.panel_x2, time_effects=True)
assert_almost_equal(result._y_trans.values.flat, [0, -0.5, 0.5])
exp_x = [[0, 0], [-10.5, -15.5], [10.5, 15.5]]
assert_almost_equal(result._x_trans.values, exp_x)
# _check_non_raw_results(result)
def testWithEntityEffects(self):
result = ols(y=self.panel_y2, x=self.panel_x2, entity_effects=True)
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = DataFrame([[0, 6, 14, 1], [0, 9, 17, 1], [1, 30, 48, 1]],
index=result._x.index, columns=['FE_B', 'x1', 'x2',
'intercept'],
dtype=float)
tm.assert_frame_equal(result._x, exp_x.ix[:, result._x.columns])
# _check_non_raw_results(result)
def testWithEntityEffectsAndDroppedDummies(self):
result = ols(y=self.panel_y2, x=self.panel_x2, entity_effects=True,
dropped_dummies={'entity' : 'B'})
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = DataFrame([[1, 6, 14, 1], [1, 9, 17, 1], [0, 30, 48, 1]],
index=result._x.index, columns=['FE_A', 'x1', 'x2',
'intercept'],
dtype=float)
tm.assert_frame_equal(result._x, exp_x.ix[:, result._x.columns])
# _check_non_raw_results(result)
def testWithXEffects(self):
result = ols(y=self.panel_y2, x=self.panel_x2, x_effects=['x1'])
assert_almost_equal(result._y.values.flat, [1, 4, 5])
res = result._x
exp_x = DataFrame([[0, 0, 14, 1], [0, 1, 17, 1], [1, 0, 48, 1]],
columns=['x1_30', 'x1_9', 'x2', 'intercept'],
index=res.index, dtype=float)
assert_frame_equal(res, exp_x.reindex(columns=res.columns))
def testWithXEffectsAndDroppedDummies(self):
result = ols(y=self.panel_y2, x=self.panel_x2, x_effects=['x1'],
dropped_dummies={'x1' : 30})
res = result._x
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = DataFrame([[1, 0, 14, 1], [0, 1, 17, 1], [0, 0, 48, 1]],
columns=['x1_6', 'x1_9', 'x2', 'intercept'],
index=res.index, dtype=float)
assert_frame_equal(res, exp_x.reindex(columns=res.columns))
def testWithXEffectsAndConversion(self):
result = ols(y=self.panel_y3, x=self.panel_x3, x_effects=['x1', 'x2'])
assert_almost_equal(result._y.values.flat, [1, 2, 3, 4])
exp_x = [[0, 0, 0, 1, 1], [1, 0, 0, 0, 1], [0, 1, 1, 0, 1],
[0, 0, 0, 1, 1]]
assert_almost_equal(result._x.values, exp_x)
exp_index = Index(['x1_B', 'x1_C', 'x2_baz', 'x2_foo', 'intercept'])
self.assertTrue(exp_index.equals(result._x.columns))
# _check_non_raw_results(result)
def testWithXEffectsAndConversionAndDroppedDummies(self):
result = ols(y=self.panel_y3, x=self.panel_x3, x_effects=['x1', 'x2'],
dropped_dummies={'x2' : 'foo'})
assert_almost_equal(result._y.values.flat, [1, 2, 3, 4])
exp_x = [[0, 0, 0, 0, 1], [1, 0, 1, 0, 1], [0, 1, 0, 1, 1],
[0, 0, 0, 0, 1]]
assert_almost_equal(result._x.values, exp_x)
exp_index = Index(['x1_B', 'x1_C', 'x2_bar', 'x2_baz', 'intercept'])
self.assertTrue(exp_index.equals(result._x.columns))
# _check_non_raw_results(result)
def testForSeries(self):
self.checkForSeries(self.series_panel_x, self.series_panel_y,
self.series_x, self.series_y)
self.checkForSeries(self.series_panel_x, self.series_panel_y,
self.series_x, self.series_y, nw_lags=0)
self.checkForSeries(self.series_panel_x, self.series_panel_y,
self.series_x, self.series_y, nw_lags=1,
nw_overlap=True)
def testRolling(self):
self.checkMovingOLS(self.panel_x, self.panel_y)
def testRollingWithFixedEffects(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
entity_effects=True)
self.checkMovingOLS(self.panel_x, self.panel_y, intercept=False,
entity_effects=True)
def testRollingWithTimeEffects(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
time_effects=True)
def testRollingWithNeweyWest(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
nw_lags=1)
def testRollingWithEntityCluster(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
cluster='entity')
def testRollingWithTimeEffectsAndEntityCluster(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
time_effects=True, cluster='entity')
def testRollingWithTimeCluster(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
cluster='time')
def testRollingWithNeweyWestAndEntityCluster(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
nw_lags=1, cluster='entity')
def testRollingWithNeweyWestAndTimeEffectsAndEntityCluster(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
nw_lags=1, cluster='entity',
time_effects=True)
def testExpanding(self):
self.checkMovingOLS(self.panel_x, self.panel_y, window_type='expanding')
def testNonPooled(self):
self.checkNonPooled(y=self.panel_y, x=self.panel_x)
self.checkNonPooled(y=self.panel_y, x=self.panel_x,
window_type='rolling', window=25, min_periods=10)
def checkNonPooled(self, x, y, **kwds):
# For now, just check that it doesn't crash
result = ols(y=y, x=x, pool=False, **kwds)
_check_repr(result)
for attr in NonPooledPanelOLS.ATTRIBUTES:
_check_repr(getattr(result, attr))
def checkMovingOLS(self, x, y, window_type='rolling', **kwds):
window = 25 # must be larger than rank of x
moving = ols(y=y, x=x, window_type=window_type,
window=window, **kwds)
index = moving._index
for n, i in enumerate(moving._valid_indices):
if window_type == 'rolling' and i >= window:
prior_date = index[i - window + 1]
else:
prior_date = index[0]
date = index[i]
x_iter = {}
for k, v in x.iteritems():
x_iter[k] = v.truncate(before=prior_date, after=date)
y_iter = y.truncate(before=prior_date, after=date)
static = ols(y=y_iter, x=x_iter, **kwds)
self.compare(static, moving, event_index=i,
result_index=n)
_check_non_raw_results(moving)
def checkForSeries(self, x, y, series_x, series_y, **kwds):
# Consistency check with simple OLS.
result = ols(y=y, x=x, **kwds)
reference = ols(y=series_y, x=series_x, **kwds)
self.compare(reference, result)
def compare(self, static, moving, event_index=None,
result_index=None):
# Check resid if we have a time index specified
if event_index is not None:
staticSlice = _period_slice(static, -1)
movingSlice = _period_slice(moving, event_index)
ref = static._resid_raw[staticSlice]
res = moving._resid_raw[movingSlice]
assert_almost_equal(ref, res)
ref = static._y_fitted_raw[staticSlice]
res = moving._y_fitted_raw[movingSlice]
assert_almost_equal(ref, res)
# Check y_fitted
for field in self.FIELDS:
attr = '_%s_raw' % field
ref = getattr(static, attr)
res = getattr(moving, attr)
if result_index is not None:
res = res[result_index]
assert_almost_equal(ref, res)
def test_auto_rolling_window_type(self):
data = tm.makeTimeDataFrame()
y = data.pop('A')
window_model = | ols(y=y, x=data, window=20, min_periods=10) | pandas.stats.api.ols |
import datetime as dt
import unittest
import pandas as pd
import numpy as np
import numpy.testing as npt
import seaice.nasateam as nt
import seaice.tools.plotter.daily_extent as de
class Test_BoundingDateRange(unittest.TestCase):
def test_standard(self):
today = dt.date(2015, 9, 22)
month_bounds = (-3, 1)
expected_bounds = (dt.date(2015, 6, 1), dt.date(2015, 10, 31))
actual = de._bounding_date_range(today, *month_bounds)
self.assertEqual(expected_bounds, actual)
def test_bounding_dates_overlap_year(self):
today = dt.date(2001, 1, 15)
month_bounds = (-1, 1)
expected_bounds = (dt.date(2000, 12, 1), dt.date(2001, 2, 28))
actual = de._bounding_date_range(today, *month_bounds)
self.assertEqual(expected_bounds, actual)
def test_bounding_dates_overlap_leap_year(self):
today = dt.date(2016, 1, 15)
month_bounds = (-1, 1)
expected_bounds = (dt.date(2015, 12, 1), dt.date(2016, 2, 29))
actual = de._bounding_date_range(today, *month_bounds)
self.assertEqual(expected_bounds, actual)
class Test_GetRecordYear(unittest.TestCase):
start_date = nt.BEGINNING_OF_SATELLITE_ERA
end_date = dt.date(2015, 12, 31)
date_index = pd.date_range(start_date, end_date)
base_series = pd.Series(index=date_index).fillna(5)
def _series(self, low=None, high=None, next_highest=None, next_lowest=None):
"""Return a series for easily testing record values. All the values are 5, with
different values set to the dates passed in as low, next_lowest, high,
and next_highest. The index of the returned series is from the beginning
of the satellite era to the end of 2015 (since that happens to be the
last complete year at the time of this writing).
"""
series = self.base_series.copy()
if high:
series[high] = 10
if next_highest:
series[next_highest] = 7
if next_lowest:
series[next_lowest] = 2
if low:
series[low] = 0
return series
def test_max(self):
"""Date: 4/2014, range: 1/2014 -> 5/2014, record:9/2002 , recordline:2002"""
series = self._series(high='2002-09-15')
date = pd.to_datetime('2014-04-15')
month_bounds = (-3, 1)
# expectation
expected = 2002
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min(self):
"""Date: 4/2014, range: 1/2014 -> 5/2014, record:9/2002(min) , recordline:2002"""
series = self._series(low='2002-09-15')
date = pd.to_datetime('2014-04-15')
month_bounds = (-3, 1)
# expectation
expected = 2002
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_current_year_is_record(self):
"""Date: 4/2014, range: 1/2014 -> 5/2014, record:3/2014, recordline:2010"""
series = self._series(high='2014-03-15', next_highest='2010-09-15')
date = pd.to_datetime('2014-04-15')
month_bounds = (-3, 1)
# expectation
expected = 2010
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_current_year_is_record(self):
"""Date: 4/2014, range: 1/2014 -> 5/2014, record:3/2014(min), recordline:2010"""
series = self._series(low='2014-03-15', next_lowest='2010-09-15')
date = pd.to_datetime('2014-04-15')
month_bounds = (-3, 1)
# expectation
expected = 2010
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_min_record_year_is_included_in_month_bounds(self):
"""Date: 2/2015, range: 10/2014 -> 3/2015, record: 1/2014, recordline: 2013-2014"""
series = self._series(low='2014-04-20', next_lowest='1999-09-15')
date = pd.to_datetime('2015-02-15')
month_bounds = (-4, 1)
# expectation
expected = 2014
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_min_record_year_before_and_crossover_forward(self):
"""Date: 12/2015, range: 8/2015 -> 1/2016, record: 12/2014, recordline: 2014-2015"""
series = self._series(low='2014-09-20', next_lowest='1999-09-15')
date = pd.to_datetime('2015-12-15')
month_bounds = (-4, 1)
# expectation
expected = 2014
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_changeover_record_is_plotted_and_aligned(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:1/2004, recordline:2004"""
series = self._series(high='2004-01-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2004
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_changeover_record_is_plotted_and_aligned(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:1/2004(min), recordline:2003-2004"""
series = self._series(low='2004-01-27')
date = | pd.to_datetime('2010-01-15') | pandas.to_datetime |
# vim: fdm=indent
# author: <NAME>
# date: 16/08/17
# content: Dataset functions to reduce dimensionality of gene expression
# and phenotypes.
# Modules
import numpy as np
import pandas as pd
from .plugins import Plugin
from ..utils.cache import method_caches
from ..counts_table.counts_table import CountsTable
from ..samplesheet import SampleSheet
from ..featuresheet import FeatureSheet
# Classes / functions
class DimensionalityReduction(Plugin):
'''Reduce dimensionality of gene expression and phenotype'''
@method_caches
def pca(self,
n_dims=2,
transform='log10',
robust=False,
random_state=None,
return_dataset=False):
'''Principal component analysis
Args:
n_dims (int): Number of dimensions (2+).
transform (string or None): Whether to preprocess the data.
robust (bool): Whether to use Principal Component Pursuit to
exclude outliers.
random_state (int): seed for the random number generator
return_dataset (False or 'samples' or 'features'): if 'samples',
return a Dataset with the PCs as features and the samples as
samples. If 'features', return a Dataset with the PCs as samples
and the features as features.
Returns:
if return_dataset is False, dict of the left eigenvectors (vs),
right eigenvectors (us) of the singular value decomposition,
eigenvalues (lambdas), the transform, and the whiten function (for
plotting). Else, a Dataset as described above.
NOTE: return_dataset='samples' is useful for subsequent sample
clustering via the Dataset.cluster plugin, as PCA space with around 20
dimensions is often a good compromise between information richness and
low noise.
'''
from sklearn.decomposition import PCA
X = self.dataset.counts.copy()
pco = self.dataset.counts.pseudocount
if transform == 'log10':
X = np.log10(X + pco)
elif transform == 'log2':
X = np.log2(X + pco)
elif transform == 'log':
X = np.log(X + pco)
whiten = lambda x: ((x.T - X.mean(axis=1)) / X.std(axis=1, ddof=0)).T
Xnorm = whiten(X)
# NaN (e.g. features that do not vary i.e. dropout)
Xnorm[np.isnan(Xnorm)] = 0
if robust:
#from numpy.linalg import matrix_rank
#rank = matrix_rank(Xnorm.values)
# Principal Component Pursuit (PSP)
rpca = _RPCA(Xnorm.values)
# L is low-rank, S is sparse (outliers)
L, S = rpca.fit(max_iter=1000, iter_print=None)
L = | pd.DataFrame(L, index=X.index, columns=X.columns) | pandas.DataFrame |
import json
import pandas as pd
import geopandas as gp
import requests
from shapely.geometry import Point
def create_folder(path):
"""Create empty directory if outpath does not already exist."""
path.parent.mkdir(parents=True, exist_ok=True)
def get_raw_data(query):
"""Get raw text data of pumps from Overpass API."""
response = requests.get(query)
return response
def write_df_to_json(cleaned_gdf, outpath):
"""Save resulting geodataframe to a .json-file in outpath directory."""
cleaned_gdf.to_file(outpath, driver="GeoJSON")
geojson = cleaned_gdf.to_json(na="null")
minified = open(outpath + ".min.json", "w+")
minified.write(json.dumps(json.loads(geojson), separators=(",", ":")))
minified.close()
print("::set-output name=file::" + outpath)
def get_overpass_gdf(json):
"""Create dataframe
Args:
json (json): Results from OSM API as json
Returns:
df (dataframe): Results from OSM API request as geodataframe with coordinates
"""
df = pd.DataFrame(json["elements"])
df = df.dropna(subset=['lon', 'lat'])
df["geometry"] = [Point(xy) for xy in zip(df.lon, df.lat)]
df = gp.GeoDataFrame(df, geometry="geometry")
return df
def transform_dataframe(gdf):
"""Takes geo data frame and cleans out unused values and does a reclassification.
Args:
gdf (GeoDataFrame): GeoDataFrame created by method get_overpass_gdf
Returns:
cleaned_gdf (GeoDataFrame): Contains only data to the pumps we actually need
"""
gdf = pd.concat([gdf.drop(["tags"], axis=1), gdf["tags"].apply(pd.Series)], axis=1)
# ceep only required tags
cleaned_gdf = gdf.filter(["id", "addr:full", "image", "pump:style", "check_date","pump:status", "geometry"])
# list of dropped columns
# "lat",
# "lon",
# "type",
# "description",
# "emergency",
# "man_made",
# "pump",
# "pump:type",
# "ref",
# "water_well",
# "playground",
# "addr:city",
# "addr:postcode",
# "fixme",
# "name",
# "website",
# "colour",
# "wheelchair",
# "tourism",
# "addr:housenumber",
# "wikipedia",
# "alt_ref",
# "note",
# "addr:street",
# "heritage:website",
# "lda:criteria",
# "depth",
# "access",
# "historic",
# "mapillary",
# "drinking_water:legal",
# "operator",
# "official_ref",
# "ref:lda",
# "heritage",
# "artist_name",
# "heritage:operator",
# "drinking_water",
# "start_date",
# "survey:date",
# "pump:style:Lauchhammer"
# ]
# TODO: [GDK-16] Notify when this happens. Since this would mean that the output from osm did change
if "check_date" not in cleaned_gdf:
cleaned_gdf["check_date"] = | pd.Series(dtype=str) | pandas.Series |
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from pandas.tests.extension.base import BaseOpsUtil
class TestArithmeticOps(BaseOpsUtil):
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
def _check_op(self, s, op_name, other, exc=None):
op = self.get_op_from_name(op_name)
result = op(s, other)
# compute expected
mask = s.isna()
# if s is a DataFrame, squeeze to a Series
# for comparison
if isinstance(s, pd.DataFrame):
result = result.squeeze()
s = s.squeeze()
mask = mask.squeeze()
# other array is an Integer
if isinstance(other, IntegerArray):
omask = getattr(other, "mask", None)
mask = getattr(other, "data", other)
if omask is not None:
mask |= omask
# 1 ** na is na, so need to unmask those
if op_name == "__pow__":
mask = np.where(~s.isna() & (s == 1), False, mask)
elif op_name == "__rpow__":
other_is_one = other == 1
if isinstance(other_is_one, pd.Series):
other_is_one = other_is_one.fillna(False)
mask = np.where(other_is_one, False, mask)
# float result type or float op
if (
is_float_dtype(other)
or is_float(other)
or op_name in ["__rtruediv__", "__truediv__", "__rdiv__", "__div__"]
):
rs = s.astype("float")
expected = op(rs, other)
self._check_op_float(result, expected, mask, s, op_name, other)
# integer result type
else:
rs = pd.Series(s.values._data, name=s.name)
expected = op(rs, other)
self._check_op_integer(result, expected, mask, s, op_name, other)
def _check_op_float(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in float dtypes
expected[mask] = np.nan
if "floordiv" in op_name:
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
mask2 = np.isinf(expected) & np.isnan(result)
expected[mask2] = np.nan
tm.assert_series_equal(result, expected)
def _check_op_integer(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in integer dtypes
# to compare properly, we convert the expected
# to float, mask to nans and convert infs
# if we have uints then we process as uints
# then convert to float
# and we ultimately want to create a IntArray
# for comparisons
fill_value = 0
# mod/rmod turn floating 0 into NaN while
# integer works as expected (no nan)
if op_name in ["__mod__", "__rmod__"]:
if is_scalar(other):
if other == 0:
expected[s.values == 0] = 0
else:
expected = expected.fillna(0)
else:
expected[
(s.values == 0).fillna(False)
& ((expected == 0).fillna(False) | expected.isna())
] = 0
try:
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
except ValueError:
expected = expected.astype(float)
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
expected[mask] = pd.NA
# assert that the expected astype is ok
# (skip for unsigned as they have wrap around)
if not s.dtype.is_unsigned_integer:
original = pd.Series(original)
# we need to fill with 0's to emulate what an astype('int') does
# (truncation) for certain ops
if op_name in ["__rtruediv__", "__rdiv__"]:
mask |= original.isna()
original = original.fillna(0).astype("int")
original = original.astype("float")
original[mask] = np.nan
tm.assert_series_equal(original, expected.astype("float"))
# assert our expected result
tm.assert_series_equal(result, expected)
def test_arith_integer_array(self, data, all_arithmetic_operators):
# we operate with a rhs of an integer array
op = all_arithmetic_operators
s = pd.Series(data)
rhs = pd.Series([1] * len(data), dtype=data.dtype)
rhs.iloc[-1] = np.nan
self._check_op(s, op, rhs)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# scalar
op = all_arithmetic_operators
s = pd.Series(data)
self._check_op(s, op, 1, exc=TypeError)
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op = all_arithmetic_operators
df = pd.DataFrame({"A": data})
self._check_op(df, op, 1, exc=TypeError)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op = all_arithmetic_operators
s = pd.Series(data)
other = np.ones(len(s), dtype=s.dtype.type)
self._check_op(s, op, other, exc=TypeError)
def test_arith_coerce_scalar(self, data, all_arithmetic_operators):
op = all_arithmetic_operators
s = pd.Series(data)
other = 0.01
self._check_op(s, op, other)
@pytest.mark.parametrize("other", [1.0, np.array(1.0)])
def test_arithmetic_conversion(self, all_arithmetic_operators, other):
# if we have a float operand we should have a float result
# if that is equal to an integer
op = self.get_op_from_name(all_arithmetic_operators)
s = pd.Series([1, 2, 3], dtype="Int64")
result = op(s, other)
assert result.dtype is np.dtype("float")
def test_arith_len_mismatch(self, all_arithmetic_operators):
# operating with a list-like with non-matching length raises
op = self.get_op_from_name(all_arithmetic_operators)
other = np.array([1.0])
s = pd.Series([1, 2, 3], dtype="Int64")
with pytest.raises(ValueError, match="Lengths must match"):
op(s, other)
@pytest.mark.parametrize("other", [0, 0.5])
def test_arith_zero_dim_ndarray(self, other):
arr = integer_array([1, None, 2])
result = arr + np.array(other)
expected = arr + other
tm.assert_equal(result, expected)
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
opa = getattr(data, op)
# invalid scalars
msg = (
r"(:?can only perform ops with numeric values)"
r"|(:?IntegerArray cannot perform the operation mod)"
)
with pytest.raises(TypeError, match=msg):
ops("foo")
with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))
if op != "__rpow__":
# TODO(extension)
# rpow with a datetimelike coerces the integer array incorrectly
msg = (
"can only perform ops with numeric values|"
"cannot perform .* with this index type: DatetimeArray|"
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *"
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# 2d
result = opa(pd.DataFrame({"A": s}))
assert result is NotImplemented
# msg = r"can only perform ops with 1-d structures"
msg = "Lengths must match"
with pytest.raises(ValueError, match=msg):
opa(np.arange(len(s)).reshape(-1, len(s)))
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
def test_divide_by_zero(self, zero, negative):
# https://github.com/pandas-dev/pandas/issues/27398
a = pd.array([0, 1, -1, None], dtype="Int64")
result = a / zero
expected = np.array([np.nan, np.inf, -np.inf, np.nan])
if negative:
expected *= -1
tm.assert_numpy_array_equal(result, expected)
def test_pow_scalar(self):
a = pd.array([-1, 0, 1, None, 2], dtype="Int64")
result = a ** 0
expected = pd.array([1, 1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** 1
expected = pd.array([-1, 0, 1, None, 2], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** pd.NA
expected = pd.array([None, None, 1, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** np.nan
expected = np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
# reversed
a = a[1:] # Can't raise integers to negative powers.
result = 0 ** a
expected = pd.array([1, 0, None, 0], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = 1 ** a
expected = pd.array([1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = pd.NA ** a
expected = pd.array([1, None, None, None], dtype="Int64")
| tm.assert_extension_array_equal(result, expected) | pandas._testing.assert_extension_array_equal |
# bca4abm
# See full license in LICENSE.txt.
from builtins import range
import logging
import os.path
import numpy as np
import pandas as pd
import itertools
from activitysim.core import inject
from activitysim.core import config
from activitysim.core import tracing
from bca4abm import bca4abm as bca
logger = logging.getLogger(__name__)
def read_csv_file(data_dir, file_name):
fpath = os.path.join(data_dir, file_name)
df = bca.read_csv_or_tsv(fpath, header=0, comment='#')
return df
def read_zone_indexed_csv_file(data_dir, file_name, zone_aliases, zone_ids_index):
fpath = os.path.join(data_dir, file_name)
logger.info("read zone indexed csv file: " + fpath)
df = bca.read_csv_or_tsv(fpath, header=0, comment='#').rename(columns=zone_aliases)
if zone_ids_index is not None:
# check that any and all zone id columns match zone_ids_index
if 'zone' in df:
# if there is a zone column, it should match zone_ids_index
assert (zone_ids_index == df.zone.values).all()
df.set_index('zone', drop=True, inplace=True)
else:
df.index = zone_ids_index
else:
# the default index is zero-based, so we can convert to 1-based zone ids simply by adding 1
df.index = df.index + 1
if 'zone' in df:
assert (df.index.values == list(range(1, len(df) + 1))).all()
df.index.name = 'ZONE'
return df
def read_and_concat_zone_indexed_csv_files(data_dir, file_names, zone_aliases, zone_ids_index):
"""
Parameters
----------
data_dir
file_names
zone_aliases
zone_ids_index
Returns
-------
omnibus_df: pandas df
df with all the columns from file list (except zone ids)
with index named 'zone' if zone_ids_index supplied
otherwise, zero-based default index
"""
omnibus_df = None
for file_name in file_names:
df = read_zone_indexed_csv_file(data_dir, file_name, zone_aliases, zone_ids_index)
if omnibus_df is None:
omnibus_df = df
else:
omnibus_df = pd.concat([omnibus_df, df], axis=1)
return omnibus_df
def check_zone_index(df, zone_ids):
if zone_ids is not None:
expected_index = zone_ids.values
else:
expected_index = list(range(1, len(df) + 1))
assert (df.index.values == expected_index).all()
assert df.index.name == 'ZONE'
@inject.table()
def zone_districts(data_dir, zone_aliases, zone_ids):
table_settings = config.read_model_settings('tables.yaml')
file_name = table_settings.get('district_file_name')
districts_df = read_zone_indexed_csv_file(data_dir, file_name, zone_aliases, zone_ids)
assert 'district' in districts_df
check_zone_index(districts_df, zone_ids)
tracing.write_csv(districts_df,
file_name='zone_districts',
transpose=False)
return districts_df
@inject.table()
def zone_hhs(data_dir, zone_aliases, zone_ids):
logger.debug("reading zone_hhs table")
table_settings = config.read_model_settings('tables.yaml')
file_name = table_settings.get('hh_file_name')
base_data_dir = os.path.join(data_dir, 'base-data')
build_data_dir = os.path.join(data_dir, 'build-data')
base_hhs_df = read_zone_indexed_csv_file(
base_data_dir, file_name,
zone_aliases, zone_ids)
build_hhs_df = read_zone_indexed_csv_file(
build_data_dir, file_name,
zone_aliases, zone_ids)
cocs_file_name = table_settings.get('ext_cocs_file_name')
base_cocs_df = read_zone_indexed_csv_file(
base_data_dir, cocs_file_name,
zone_aliases, zone_ids)
base_hhs_df = | pd.concat([base_hhs_df, base_cocs_df], axis=1) | pandas.concat |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: | pd.Timestamp("2012-08-19 00:00:00") | pandas.Timestamp |
import logging
logging.basicConfig(level=logging.WARNING)
import pytest
import numpy
import os
import pypipegraph as ppg
import pandas as pd
from pathlib import Path
from pandas.testing import assert_frame_equal
import dppd
import dppd_plotnine # noqa:F401
from mbf_qualitycontrol.testing import assert_image_equal
from mbf_sampledata import get_sample_data
import mbf_genomics.regions as regions
from mbf_genomics.annotator import Constant, Annotator
from .shared import (
get_genome,
get_genome_chr_length,
force_load,
inside_ppg,
run_pipegraph,
RaisesDirectOrInsidePipegraph,
MockGenome,
)
dp, X = dppd.dppd()
@pytest.mark.usefixtures("new_pipegraph")
class TestGenomicRegionsLoadingPPGOnly:
def test_dependency_passing(self):
job = ppg.ParameterInvariant("sha", (None,))
a = regions.GenomicRegions("shu", lambda: None, [job], get_genome())
load_job = a.load()
assert job in load_job.lfg.prerequisites
def test_dependency_may_be_iterable_instead_of_list(self):
job = ppg.ParameterInvariant("shu", (None,))
a = regions.GenomicRegions("shu", lambda: None, (job,), get_genome())
load_job = a.load()
assert job in load_job.lfg.prerequisites
def test_depenencies_must_be_jobs(self):
ppg.ParameterInvariant("shu", (None,))
with pytest.raises(ValueError):
regions.GenomicRegions("shu", lambda: None, ["shu"], get_genome())
@pytest.mark.usefixtures("both_ppg_and_no_ppg")
class TestGenomicRegionsLoading:
def test_raises_on_duplicate_name(self, both_ppg_and_no_ppg):
def sample_data():
return pd.DataFrame(
{"chr": ["Chromosome"], "start": [1000], "stop": [1100]}
)
regions.GenomicRegions("shu", sample_data, [], get_genome())
if inside_ppg():
with pytest.raises(ValueError):
regions.GenomicRegions("shu", sample_data, [], get_genome())
both_ppg_and_no_ppg.new_pipegraph()
regions.GenomicRegions(
"shu", sample_data, [], get_genome()
) # should not raise
def test_raises_on_non_iterable_dependencies(self):
def sample_data():
return pd.DataFrame(
{"chr": ["Chromosome"], "start": [1000], "stop": [1100]}
)
with pytest.raises(ValueError):
regions.GenomicRegions("shu", sample_data, "aaeu", get_genome())
with pytest.raises(ValueError):
regions.GenomicRegions("shu", sample_data, 1, get_genome())
with pytest.raises(ValueError):
regions.GenomicRegions("shu", sample_data, iter([]), get_genome())
def test_loading(self):
def sample_data():
return pd.DataFrame(
{"chr": ["Chromosome"], "start": [1000], "stop": [1100]}
)
a = regions.GenomicRegions("sha", sample_data, [], get_genome())
if inside_ppg():
assert not hasattr(a, "df")
force_load(a.load())
else:
assert hasattr(a, "df")
run_pipegraph()
assert hasattr(a, "df")
assert len(a.df) == 1
assert "chr" in a.df.columns
assert "start" in a.df.columns
assert "stop" in a.df.columns
def test_filtering_copy_anno(self, clear_annotators):
import mbf_genomics
def sample_data():
return pd.DataFrame(
{
"chr": "Chromosome",
"start": [1000, 1001, 1002],
"stop": [1100, 1101, 1102],
}
)
a = regions.GenomicRegions(
"sha", sample_data, [], get_genome(), on_overlap="ignore"
)
b = a.filter("filtered", ("start", "==", 1001))
class CopyAnno(mbf_genomics.annotator.Annotator):
def __init__(self):
self.columns = ["copy"]
def calc(self, df):
return pd.DataFrame({"copy": df["start"]})
a += CopyAnno()
if inside_ppg():
assert not hasattr(a, "df")
force_load(a.load())
force_load(b.annotate())
else:
assert hasattr(a, "df")
run_pipegraph()
print(b.df)
assert (b.df["start"] == [1001]).all()
assert (b.df["copy"] == [1001]).all()
def test_raises_on_invalid_on_overlap(self):
def inner():
regions.GenomicRegions(
"shu",
lambda: None,
[],
get_genome(),
on_overlap="run in circles all about",
)
with pytest.raises(ValueError):
inner()
def test_magic(self):
def sample_data():
return pd.DataFrame(
{"chr": ["Chromosome"], "start": [1000], "stop": [1100]}
)
a = regions.GenomicRegions("shu", sample_data, [], get_genome())
hash(a)
str(a)
repr(a)
bool(a)
a.load()
run_pipegraph()
with pytest.raises(TypeError):
iter(a)
def test_loading_missing_start(self):
def sample_data():
return pd.DataFrame({"chr": "1", "stop": [1100]})
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions("sha", sample_data, [], get_genome())
force_load(a.load)
def test_loading_missing_chr(self):
def sample_data():
return pd.DataFrame({"start": [1000], "stop": [1100]})
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions("sha", sample_data, [], get_genome())
force_load(a.load)
def test_loading_missing_stop(self):
def sample_data():
return pd.DataFrame({"chr": "Chromosome", "start": [1200]})
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions("sha", sample_data, [], get_genome())
force_load(a.load)
def test_loading_raises_on_invalid_chromosome(self):
def sample_data():
return | pd.DataFrame({"chr": ["1b"], "start": [1200], "stop": [1232]}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import h5py
"""
This module loads h5 files created by a DSM2 hydro or qual run.
All the input, geometry and data tables are available as pandas DataFrame objects
In addition there are convenience methods for retrieving the data tables as
DataFrame that represent time series. The date range is based on attributes in
the tables, namely start time and time interval.
"""
_path_structure_map = {"hydro": ['/hydro/input', '/hydro/data', '/hydro/geometry'],
"qual": ["input", "output"]}
"""
hardwired lists of data available for modeltype. Currently
the model type is either 'hydro' or 'qual'
"""
_MODEL_TO_DATA_PATHS_MAP = {
'hydro':
['/hydro/data/'+s
for s in ['channel area',
'channel avg area', 'channel flow',
'channel stage', 'qext flow', 'reservoir flow',
'reservoir height', 'transfer flow']],
'qual':
['/output/'+s
for s in ['channel avg concentration',
'channel concentration',
'reservoir concentration']]
}
def get_model(filename):
"""
returns one of "hydro" or "qual"
"""
with h5py.File(filename, 'r') as f:
if f.get('/hydro'):
return "hydro"
else:
return "qual"
def get_datapaths(modeltype="hydro"):
return _MODEL_TO_DATA_PATHS_MAP[modeltype]
def list_table_paths(filename):
"""
returns a list of paths to tables (h5py.DataSets)
"""
table_paths = []
with h5py.File(filename, 'r') as f:
# incase both hydro and qual output to the same file (some ovewriting possible?)
group_paths = ['/hydro/input', '/hydro/data',
'/hydro/geometry', 'input', 'output']
for path in group_paths:
g = f.get(path)
if not g:
continue
for key in g.keys():
table_paths.append(path+'/'+key)
return table_paths
def list_groups_as_df(filename, group_path):
''' reads listing of group path as pd.DataFrame '''
with h5py.File(filename, 'r') as f:
return pd.DataFrame(f[group_path])
def read_table_attr(filename, table_path):
'''
returns a dictionary of attribute names to values
'''
return df_to_dict(_read_table_attrs_as_df(filename,table_path))
def _read_table_attrs_as_df(filename, table_path):
'''
reads a tables attribute as a pandas.DataFrame
returns a data frame of attributes as Name and Value columns
'''
with h5py.File(filename, 'r') as f:
bf = f[table_path]
a = pd.DataFrame(bf.attrs.items(), columns=[
'Name', 'Value'], dtype=np.str)
a = a.append(pd.DataFrame([('shape', str(bf.shape))], columns=[
'Name', 'Value']), ignore_index=True)
return a
def df_to_dict(df, key_column='Name', value_column='Value'):
'''
creates a dictionary based on a key_column (default Name) mapped to a value_column (default Value)
'''
return dict(zip(df[key_column], df[value_column]))
def _convert_time_to_table_slice(start_time, end_time, interval, table_start_time, table_time_length):
'''
start_time and end_time as convertable to to_datetime
interval as convertable to Timedelta
table_start_time convertable to_datetime
table_time_length int
'''
st = pd.to_datetime(start_time)
et = pd.to_datetime(end_time)
table_start_time = pd.to_datetime(table_start_time)
interval = pd.Timedelta(interval)
if et < st:
raise "Start time: "+st+" is ahead of end time: "+et
table_end_time = table_start_time+interval*table_time_length
if st < table_start_time:
st = table_start_time
if et > table_end_time:
et = table_end_time
start_index = int((st-table_start_time)/interval)
end_index = int((et-table_start_time)/interval)
return slice(start_index, end_index, 1)
def df_column_values_to_index(df, column_label, matching_values):
'''
returns in the index values for the column_label for the matching_values
in the DataFrame of that column
'''
return df[df[column_label].isin(matching_values)].index.values
def read_table_as_df(filename, table_path, sliver=slice(None)):
'''
reads table as a pandas.DateFrame
if slice is specified, only that slice of the table is read, default is slice(None)
returns a data frame with forcing datatype to string
'''
with h5py.File(filename, 'r') as f:
bf = f[table_path][sliver]
x = | pd.DataFrame(bf, dtype=np.str) | pandas.DataFrame |
import re
import unicodedata
from collections import Counter
from itertools import product
import pickle
import numpy as np
import pandas as pd
from sklearn.decomposition import TruncatedSVD
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelEncoder
import umap
import pickle
from src import sentence_splitter
def get_umap(train, test, size=2):
um = umap.UMAP(transform_seed=1, random_state=1, n_neighbors=size)
um.fit(train.values)
tr_em = um.transform(train.values)
te_em = um.transform(test.values)
return tr_em, te_em
def LE(train, test):
for col in train.columns:
if train[col].dtypes == object:
train[col].fillna("null")
test[col].fillna("null")
lbl = LabelEncoder()
lbl.fit(list(train[col].values) + list(test[col].values))
train[col] = lbl.transform(list(train[col].values))
test[col] = lbl.transform(list(test[col].values))
# カウントエンコーディング
def CE(train, test, cols, all_df):
for col in cols:
# all_df = pd.concat([train.drop(["y"], axis=1), test], ignore_index=True).reset_index()
train[col + "_count"] = train[col].map(all_df[col].value_counts())
test[col + "_count"] = test[col].map(all_df[col].value_counts())
# ターゲットエンコーディング
def TE(train, test, func, target, cols):
funcs = ["max", "min", "mean", "std"]
for col in cols:
data_tmp = pd.DataFrame({col: train[col], "target": target})
target_dic = data_tmp.groupby(col)["target"].aggregate(func)
test[col + "_TE_" + func] = test[col].map(target_dic)
tmp = np.repeat(np.nan, train.shape[0])
# 学習データを分割
kf = StratifiedKFold(n_splits=5, shuffle=True, random_state=22)
for idx_1, idx_2 in kf.split(train, train[col]):
target_dic = data_tmp.iloc[idx_1].groupby(col)["target"].aggregate(func)
tmp[idx_2] = train[col].iloc[idx_2].map(target_dic)
train[col + "_TE_" + func] = tmp
def group(train, test, col, target, all_df):
mean_map = all_df.groupby(col)[target].mean()
train["group_" + col + "_mean_" + target] = train[col].map(mean_map)
test["group_" + col + "_mean_" + target] = test[col].map(mean_map)
std_map = all_df.groupby(col)[target].std()
train["group_" + col + "_std_" + target] = train[col].map(std_map)
test["group_" + col + "_std_" + target] = test[col].map(std_map)
sum_map = all_df.groupby(col)[target].sum()
train["group_" + col + "_sum_" + target] = train[col].map(sum_map)
test["group_" + col + "_sum_" + target] = test[col].map(sum_map)
min_map = all_df.groupby(col)[target].min()
train["group_" + col + "_min_" + target] = train[col].map(min_map)
test["group_" + col + "_min_" + target] = test[col].map(min_map)
max_map = all_df.groupby(col)[target].max()
train["group_" + col + "_max_" + target] = train[col].map(max_map)
test["group_" + col + "_max_" + target] = test[col].map(max_map)
train["group_" + col + "_range_" + target] = \
train["group_" + col + "_max_" + target] - train["group_" + col + "_min_" + target]
test["group_" + col + "_range_" + target] = \
test["group_" + col + "_max_" + target] - test["group_" + col + "_min_" + target]
def calculate(df: pd.DataFrame):
df["eval_count"] = df.likes + df.dislikes
df["likes_ratio"] = df.likes / df.eval_count
df["likes_ratio"].fillna(-1)
df["dislikes_ratio"] = df.dislikes / df.eval_count
df["dislikes_ratio"].fillna(-1)
df["score"] = df["comment_count"] * df["eval_count"]
df["score_2"] = df["comment_count"] / df["eval_count"]
df["title_div_description"] = df["title_len"] / df["description_len"]
df["title_mul_description"] = df["title_len"] * df["description_len"]
def is_japanese(string):
count = 0
for ch in str(string):
try:
name = unicodedata.name(ch)
except:
continue
if "CJK UNIFIED" in name \
or "HIRAGANA" in name \
or "KATAKANA" in name:
count += 1
return count
def count_alphabet(string):
r = re.compile(r"[a-z|A-Z]+")
return len("".join(r.findall(str(string))))
def count_number(string):
r = re.compile(r"[0-9]+")
return len("".join(r.findall(str(string))))
def change_to_Date(train, test, input_column_name, output_column_name):
train[output_column_name] = train[input_column_name].map(lambda x: x.split('.'))
test[output_column_name] = test[input_column_name].map(lambda x: x.split('.'))
train[output_column_name] = train[output_column_name].map(
lambda x: '20' + x[0] + '-' + x[2] + '-' + x[1] + 'T00:00:00.000Z')
test[output_column_name] = test[output_column_name].map(
lambda x: '20' + x[0] + '-' + x[2] + '-' + x[1] + 'T00:00:00.000Z')
def tag_counter(train, test, n=500, pca_size=None, drop=False, create=True):
cols = [f"tags_{i}" for i in range(n)]
if create:
# tagのカウント
tags = []
for tag in train["tags"]:
tags.extend(str(tag).split("|"))
tmp = Counter(tags)
tmp = sorted(tmp.items(), key=lambda x: x[1], reverse=True)[:n]
for i, item in enumerate(tmp):
train[f"tags_{i}"] = train["tags"].apply(lambda x: 1 if item[0] in str(x).split("|") else 0)
test[f"tags_{i}"] = test["tags"].apply(lambda x: 1 if item[0] in str(x).split("|") else 0)
train[cols].to_csv("./data/input/train_tags.csv", index=False)
test[cols].to_csv("./data/input/test_tags.csv", index=False)
else:
train_tags = pd.read_csv("./data/input/train_tags.csv")
test_tags = pd.read_csv("./data/input/test_tags.csv")
train = pd.concat([train, train_tags[cols]], axis=1)
test = pd.concat([test, test_tags[cols]], axis=1)
if pca_size:
# pca = TruncatedSVD(n_components=pca_size, random_state=2)
# pca.fit(train[cols])
# train_pca = pca.transform(train[cols])
# test_pca = pca.transform(test[cols])
train_pca, test_pca = get_umap(train[cols], test[cols], size=pca_size)
pca_cols = [f"tangs_pca_{i}" for i in range(pca_size)]
train = pd.concat([train, pd.DataFrame(train_pca, columns=pca_cols)], axis=1)
test = pd.concat([test, pd.DataFrame(test_pca, columns=pca_cols)], axis=1)
if drop:
train = train.drop(cols, axis=1)
test = test.drop(cols, axis=1)
return train, test
def title_counter(train, test, n=100, pca_size=None, drop=False, create=True):
train["title_words"] = train.title.apply(lambda x: sentence_splitter.splitter(str(x)))
test["title_words"] = test.title.apply(lambda x: sentence_splitter.splitter(str(x)))
cols = [f"title_word_{i}" for i in range(n)]
if create:
# titleの単語のカウント
word_list = []
for words in train["title_words"]:
word_list.extend(words)
tmp = Counter(word_list)
tmp = sorted(tmp.items(), key=lambda x: x[1], reverse=True)[:n]
for i, item in enumerate(tmp):
train[f"title_word_{i}"] = train["title_words"].apply(lambda x: x.count(item[0]))
test[f"title_word_{i}"] = test["title_words"].apply(lambda x: x.count(item[0]))
train[cols].to_csv("./data/input/train_title_words.csv", index=False)
test[cols].to_csv("./data/input/test_title_words.csv", index=False)
else:
train_tags = pd.read_csv("./data/input/train_title_words.csv")
test_tags = pd.read_csv("./data/input/test_title_words.csv")
train = pd.concat([train, train_tags[cols]], axis=1)
test = pd.concat([test, test_tags[cols]], axis=1)
if pca_size:
# pca = TruncatedSVD(n_components=pca_size, random_state=2)
# pca.fit(train[cols])
# train_pca = pca.transform(train[cols])
# test_pca = pca.transform(test[cols])
train_pca, test_pca = get_umap(train[cols], test[cols], size=pca_size)
pca_cols = [f"title_pca_{i}" for i in range(pca_size)]
train = pd.concat([train, pd.DataFrame(train_pca, columns=pca_cols)], axis=1)
test = pd.concat([test, pd.DataFrame(test_pca, columns=pca_cols)], axis=1)
if drop:
train = train.drop(cols, axis=1)
test = test.drop(cols, axis=1)
train = train.drop(["title_words"], axis=1)
test = test.drop(["title_words"], axis=1)
return train, test
def count_tag_in_title(tags, title):
tag_list = str(tags).split("|")
count = 0
for tag in tag_list:
if tag in str(title):
count += 1
return count
def category_unstack(train, test, all_df, group, category, normalize=True, pca_size=2):
use_columns = set(train[category].unique()) & set(test[category].unique())
unstack_df = all_df.groupby(group)[category].value_counts(normalize=normalize).unstack().fillna(0)
for col in use_columns:
train[f"{category}_{col}_ratio_in_{group}_group"] = train[group].map(unstack_df[col])
test[f"{category}_{col}_ratio_in_{group}_group"] = test[group].map(unstack_df[col])
cols = [f"{category}_{col}_ratio_in_{group}_group" for col in use_columns]
pca_cols = [f"{category}_pca_{i}_in_{group}_group" for i in range(pca_size)]
pca = TruncatedSVD(n_components=pca_size, random_state=2)
pca.fit(train[cols])
train_pca = pca.transform(train[cols])
test_pca = pca.transform(test[cols])
train = pd.concat([train, pd.DataFrame(train_pca, columns=pca_cols)], axis=1)
test = pd.concat([test, pd.DataFrame(test_pca, columns=pca_cols)], axis=1)
return train, test
def make_dataset(complement=True):
train = pd.read_csv("./data/input/train_data.csv")
test = pd.read_csv("./data/input/test_data.csv")
if complement:
complement_likes = pd.read_csv("./data/input/complement_likes.csv")
complement_dislikes = pd.read_csv("./data/input/complement_dislikes.csv")
complement_comment = pd.read_csv("./data/input/complement_comment.csv")
likes_dict = dict(zip(complement_likes.video_id, complement_likes.y))
dislikes_dict = dict(zip(complement_dislikes.video_id, complement_dislikes.y))
comment_dict = dict(zip(complement_comment.video_id, complement_comment.y))
train["likes"] = train.apply(
lambda x: likes_dict[x["video_id"]] if x["video_id"] in likes_dict.keys() else x["likes"], axis=1)
train["dislikes"] = train.apply(
lambda x: dislikes_dict[x["video_id"]] if x["video_id"] in dislikes_dict.keys() else x["dislikes"], axis=1)
train["comment_count"] = train.apply(
lambda x: comment_dict[x["video_id"]] if x["video_id"] in comment_dict.keys() else x["comment_count"],
axis=1)
test["likes"] = test.apply(
lambda x: likes_dict[x["video_id"]] if x["video_id"] in likes_dict.keys() else x["likes"], axis=1)
test["dislikes"] = test.apply(
lambda x: dislikes_dict[x["video_id"]] if x["video_id"] in dislikes_dict.keys() else x["dislikes"], axis=1)
test["comment_count"] = test.apply(
lambda x: comment_dict[x["video_id"]] if x["video_id"] in comment_dict.keys() else x["comment_count"],
axis=1)
# サムネイルの色の平均
# train_thumbnail = pd.read_csv("./data/input/train_thumbnail.csv")
# test_thumbnail = pd.read_csv("./data/input/test_thumbnail.csv")
# train = train.merge(train_thumbnail, on="video_id")
# test = test.merge(test_thumbnail, on="video_id")
# サムネイル特徴量
# train_image_features = pd.read_csv("./data/input/train_image_features.csv")
# test_image_features = pd.read_csv("./data/input/test_image_features.csv")
# train_umap, test_umap = get_umap(train_image_features, test_image_features, size=2)
# pca_cols = [f"image_features_umap_{i}" for i in range(2)]
# train = pd.concat([train, pd.DataFrame(train_umap, columns=pca_cols)], axis=1)
# test = pd.concat([test, pd.DataFrame(test_umap, columns=pca_cols)], axis=1)
train.likes = train.likes.apply(np.log1p)
test.likes = test.likes.apply(np.log1p)
train.dislikes = train.dislikes.apply(np.log1p)
test.dislikes = test.dislikes.apply(np.log1p)
train.comment_count = train.comment_count.apply(np.log1p)
test.comment_count = test.comment_count.apply(np.log1p)
train["title_len"] = train.title.apply(lambda x: len(str(x)))
test["title_len"] = test.title.apply(lambda x: len(str(x)))
train["channelTitle_len"] = train.channelTitle.apply(lambda x: len(str(x)))
test["channelTitle_len"] = test.channelTitle.apply(lambda x: len(str(x)))
train["description_len"] = train.description.apply(lambda x: len(str(x)))
test["description_len"] = test.description.apply(lambda x: len(str(x)))
train["tags_count"] = train.tags.apply(lambda x: str(x).count("|"))
test["tags_count"] = test.tags.apply(lambda x: str(x).count("|"))
# 時間系
train["year"] = pd.to_datetime(train.publishedAt).apply(lambda x: x.year)
test["year"] = pd.to_datetime(test.publishedAt).apply(lambda x: x.year)
train["month"] = pd.to_datetime(train.publishedAt).apply(lambda x: x.month)
test["month"] = pd.to_datetime(test.publishedAt).apply(lambda x: x.month)
train["hour"] = pd.to_datetime(train.publishedAt).apply(lambda x: x.hour)
test["hour"] = pd.to_datetime(test.publishedAt).apply(lambda x: x.hour)
change_to_Date(train, test, "collection_date", "collectionAt")
train["period"] = (pd.to_datetime(train.collectionAt) - pd.to_datetime(train.publishedAt)).apply(lambda x: x.days)
test["period"] = (pd.to_datetime(test.collectionAt) - pd.to_datetime(test.publishedAt)).apply(lambda x: x.days)
train["publishedAt"] = pd.to_datetime(train.publishedAt).apply(lambda x: x.value)
test["publishedAt"] = | pd.to_datetime(test.publishedAt) | pandas.to_datetime |
#!/usr/bin/python3
import json, dateutil
import pandas as pd
import coin_wizard.broker_platform_objects as BrokerPlatform
from datetime import datetime
from time import sleep
from oandapyV20 import API
import oandapyV20.endpoints.accounts as accounts
import oandapyV20.endpoints.orders as orders
import oandapyV20.endpoints.trades as trades
import oandapyV20.endpoints.instruments as instruments
import oandapyV20.endpoints.pricing as pricing
import oandapyV20.endpoints.transactions as transactions
from .account_instruments_candles_patch import AccountsInstrumentsCandles
from oandapyV20.contrib.requests import MarketOrderRequest
update_interval_threshold_ms = 50
class BrokerEventLoopAPI(BrokerPlatform.BrokerEventLoopAPI):
hedging = False
broker_settings_fields = ['access_token', 'account_id']
def __init__(self, before_loop, after_loop, broker_settings, nsp, loop_interval_ms = 1000):
super().__init__(before_loop, after_loop, broker_settings, nsp, loop_interval_ms)
self.oanda_api = API(access_token=broker_settings['access_token'])
self.account_id = broker_settings['account_id']
self.instruments_watchlist = {}
self.latest_sync_transaction_id = 0
self.account = BrokerPlatform.Account(self._update_account_handler)
self.account.latest_update_datetime = datetime.now()
# Initializing
# Account
r = accounts.AccountDetails(self.account_id)
rv = self.oanda_api.request(r)
# print(json.dumps(rv, indent=2))
account = rv['account']
self.latest_sync_transaction_id = int(account['lastTransactionID'])
self.account.balance = float(account['balance'])
self.account.currency = account['currency']
self.account.margin_rate = float(account['marginRate'])
self.account.margin_used = float(account['marginUsed'])
self.account.margin_available = float(account['marginAvailable'])
self.account.unrealized_pl = float(account['unrealizedPL'])
for order_detail in account['orders']:
try:
# print(json.dumps(order_detail, indent=2))
self._import_order_detail(order_detail)
# print(order_detail)
except Exception as e:
# print(json.dumps(order_detail, indent=2))
# print(e)
pass
for trade_detail in account['trades']:
# print(json.dumps(trade_detail, indent=2))
self._import_trade_detail(trade_detail)
def order(self, instrument_name, order_settings, trade_settings):
order_settings_r = {
"instrument": instrument_name,
"type": order_settings['type'].upper()
}
if "bound" in order_settings:
order_settings_r['priceBound'] = order_settings['bound']
if "price" in order_settings:
order_settings_r['price'] = order_settings['price']
# trade_settings
# if "units" in trade_settings:
order_settings_r['units'] = trade_settings['units']
if "take_profit" in trade_settings:
order_settings_r['takeProfitOnFill'] = {
"price": trade_settings['take_profit']
}
if "stop_lost" in trade_settings:
order_settings_r['stopLossOnFill'] = {
"price": trade_settings['stop_lost']
}
if "trailing_stop_distance" in trade_settings:
order_settings_r['trailingStopLossOnFill'] = {
"distance": trade_settings['trailing_stop_distance']
}
r = orders.OrderCreate(self.account_id, {'order': order_settings_r})
rv = self.oanda_api.request(r)
# print(json.dumps(rv, indent=2))
rv['orderCreateTransaction']['type'] = order_settings['type'].upper()
return self._import_order_detail(rv['orderCreateTransaction'])
def getInstrument(self, instrument_name):
if instrument_name in self.instruments_watchlist:
return self.instruments_watchlist[instrument_name]
instrument = BrokerPlatform.Instrument(instrument_name, self._update_instrument_handler)
latest_candles_iso_time = None
for granularity in instrument.recent_candles:
params = {
"granularity": granularity,
"count": 5000,
# "count": 3,
"price": "M",
"alignmentTimezone": "UTC",
"dailyAlignment": 0
}
r = AccountsInstrumentsCandles(self.account_id, instrument.instrument_name, params)
# r = instruments.InstrumentsCandles(instrument_name, params)
rv = self.oanda_api.request(r)
# print(json.dumps(rv, indent=2))
candles = rv['candles']
# print(json.dumps(candles, indent=2))
candles_df = self._convert_mid_candles_to_dataframe(candles)
# print(candles_df)
instrument.recent_candles[granularity] = candles_df.loc[candles_df['completed'] == True]
if granularity == 'M1':
latest_candles_iso_time = candles[-1]['time']
instrument.latest_candles_iso_time = latest_candles_iso_time
instrument.latest_update_datetime = datetime.now()
self.instruments_watchlist[instrument_name] = instrument
# raise
return instrument
def _import_order_detail(self, order_detail):
if order_detail['type'] in ['MARKET', 'LIMIT', 'STOP']:
order_settings = {
"type": order_detail['type'].lower()
}
if "price" in order_detail:
order_settings['price'] = float(order_detail['price'])
if "priceBound" in order_detail:
order_settings['bound'] = float(order_detail['priceBound'])
trade_settings = {}
if "units" in order_detail:
trade_settings['units'] = float(order_detail['units'])
if "takeProfitOnFill" in order_detail:
trade_settings['take_profit'] = float(order_detail['takeProfitOnFill']['price'])
if "stopLossOnFill" in order_detail:
trade_settings['stop_lost'] = float(order_detail['stopLossOnFill']['price'])
if "trailingStopLossOnFill" in order_detail:
trade_settings['trailing_stop_distance'] = float(order_detail['trailingStopLossOnFill']['distance'])
# print(json.dumps(order_detail, indent=2))
order = BrokerPlatform.Order(order_detail['id'], order_detail['instrument'], order_settings, trade_settings)
order.cancel_handler = self._order_cancel_handler
# order
self.account.orders.append(order)
# print(self.account.orders)
return order
else:
# print(order_detail)
raise Exception('Cannot import order detail.')
def _import_trade_detail(self, trade_detail):
# print(json.dumps(trade_detail, indent=2))
trade_settings = {}
trade_settings['units'] = float(trade_detail['initialUnits'])
trade_settings['current_units'] = float(trade_detail['currentUnits'])
if "takeProfitOrder" in trade_detail:
trade_settings['take_profit'] = float(trade_detail['takeProfitOrder']['price'])
elif "takeProfitOrderID" in trade_detail:
r = orders.OrderDetails(self.account_id, trade_detail['takeProfitOrderID'])
rv = self.oanda_api.request(r)
trade_settings['take_profit'] = float(rv['order']['price'])
if "stopLossOrder" in trade_detail:
trade_settings['stop_loss'] = float(trade_detail['stopLossOrder']['price'])
elif "stopLossOrderID" in trade_detail:
r = orders.OrderDetails(self.account_id, trade_detail['stopLossOrderID'])
rv = self.oanda_api.request(r)
trade_settings['stop_loss'] = float(rv['order']['price'])
if "trailingStopLossOrder" in trade_detail:
trade_settings['trailing_stop_distance'] = float(trade_detail['trailingStopLossOrder']['distance'])
elif "trailingStopLossOrderID" in trade_detail:
r = orders.OrderDetails(self.account_id, trade_detail['trailingStopLossOrderID'])
rv = self.oanda_api.request(r)
# print(json.dumps(rv, indent=2))
trade_settings['trailing_stop_distance'] = float(rv['order']['distance'])
# print(json.dumps(trade_detail, indent=2))
trade = BrokerPlatform.Trade(trade_detail['id'], trade_detail['instrument'], trade_detail['price'], trade_settings, self._update_trade_handler)
if "takeProfitOrderID" in trade_detail:
trade.take_profit_order_id = trade_detail['takeProfitOrderID']
if "stopLossOrderID" in trade_detail:
trade.stop_lost_order_id = trade_detail['stopLossOrderID']
if "trailingStopLossOrderID" in trade_detail:
trade.trailing_stop_order_id = trade_detail['trailingStopLossOrderID']
trade.close_handler = self._trade_close_handler
trade.modify_handler = None
trade.closed = trade_detail['state'] == 'CLOSED'
trade.open_price = float(trade_detail['price'])
trade.price = float(trade_detail['price'])
if 'unrealizedPL' in trade_detail:
trade.unrealized_pl = float(trade_detail['unrealizedPL'])
else:
trade.unrealized_pl = 0.0
self.account.trades.append(trade)
trade.latest_update_datetime = datetime.now()
# print(self.account.trades)
return trade
def _convert_mid_candles_to_dataframe(self, candles):
rows = []
for candle in candles:
row = {
"timestamp": candle['time'],
"open": float(candle['mid']['o']),
"high": float(candle['mid']['h']),
"low": float(candle['mid']['l']),
"close": float(candle['mid']['c']),
"volume": float(candle['volume']),
"completed": candle['complete'],
}
rows.append(row)
df = pd.DataFrame(data=rows)
df['timestamp'] = pd.DatetimeIndex( | pd.to_datetime(df['timestamp']) | pandas.to_datetime |
import pandas as pd
import numpy as np
from urllib.request import urlopen
import requests
from bs4 import BeautifulSoup
from unidecode import unidecode
from Player import Player
class SeasonStats:
"""
The class scrapes and stores NBA Player Stats for a certain NBA season.
"""
def __init__(self, season='2020-2021', update=False):
"""
The method initializes the object type SeasonStats
Attributes
----------
season: str
The NBA season of player stats, in form "YYYY-YYYY"
update: bool
Bool to represent whether to scrape data for new games that have happened
url: str
The URL to scrape
link: BeautifulSoup
The object to scrape the URL
players: None
Pandas DataFrame to be filled with player name, position and URL
for all players who played in an NBA season
gamelogs: Pandas DataFrame
Pandas Dataframe with all gamelogs of all players in an NBA season
agg_data: Pandas DataFrame
Pandas Dataframe with aggregated data of per game stats
updated_gamelogs: Pandas DataFrame
Pandas DataFrame with all gamelogs of all players after a certain date.
Only used if update attribute is True
"""
if self._validate_season(season):
self.season = season
self.update = update
if self.update == True:
# read in data, find latest date
# in order to scrape games that happened
# after this date
self.gamelogs = pd.read_csv("../data/nba_gamelogs.csv", parse_dates=["Date"])
self.last_date = self.gamelogs.Date.max()
self.updated_gamelogs = pd.DataFrame()
else:
self.gamelogs = None
self.url = self._generate_url()
self.link = BeautifulSoup(requests.get(self.url).text, "lxml")
self.players = None
self.agg_data = pd.DataFrame()
def _validate_season(self, season):
"""
Method to validate the input season
Parameters
----------
season: str
The NBA season of player stats, in form "YYYY-YYYY"
Returns
----------
bool
bool representing if season is valid
"""
# test type and length of input
if not isinstance(season, str):
raise TypeError("Input season must be a string")
if len(season)!=9:
raise ValueError("Length of input season must be 9")
lower = season[:4]
divider = season[4]
upper = season[5:]
# ensure input entered in valid manner ("YYYY-YYYY")
if not lower.isdigit():
raise ValueError("Lower range of season must be a digit")
if not upper.isdigit():
raise ValueError("Upper range of season must be a digit")
if divider != "-":
raise ValueError("Divider between season must be -")
lower = int(lower)
upper = int(upper)
# test that years inputted are entered in order,
# and are in a certain year range
if lower >= upper:
raise ValueError("Lower range of season must be less than upper range")
if lower not in range(1989,2021):
raise ValueError("Lower range of season must be between 1989 to 2020")
if lower + 1 != upper:
raise ValueError("Lower range of season must be 1 year less than upper")
return True
def _generate_url(self):
"""
Method to generate url to scrape players
Returns
----------
str
str representing the website to scrape
"""
year = self.season[5:]
return "https://www.basketball-reference.com/leagues/NBA_" + year + "_per_game.html"
def _scrape_players(self):
"""
Method scrapes player names and player URLs for all players in an NBA season and
stores in the players attribute.
"""
table = self.link.find_all("table")[0]
data = []
for row in table.find_all("tr"):
# for each row, scrape URL, name and position of player
row_data = {"URL":"", "Name":"", "Position":""}
for cell in row.find_all("td"):
if cell["data-stat"] == "player":
# use player id for URL of player's stats
player_id = cell["data-append-csv"]
gl_url = "https://www.basketball-reference.com/players/a/" + player_id + "/gamelog/" + self.season[5:]
row_data["URL"] = gl_url
# remove accents from names
row_data["Name"] = unidecode(cell.text)
elif cell["data-stat"] == "pos":
pos = cell.text
# take first position, if player has multiple positions
if "-" in pos:
p = pos.split("-")[0]
row_data["Position"] = p
else:
row_data["Position"] = pos
data.append(row_data)
self.players = pd.DataFrame(data)
# drop NAs and duplicates
self.players = self.players.query("URL!=''")
self.players = self.players.drop_duplicates(subset=["URL"]).reset_index()
self.players = self.players[["Name", "Position", "URL"]]
def _scrape_player_data(self):
"""
Method iterates through all players who played in an NBA season,
creates an instance of the Player class, scrapes the player's gamelogs
for the season, and appends to the data attribute.
"""
# if update is true (updating data for new games)
if isinstance(self.gamelogs, pd.DataFrame):
for i in range(self.players.shape[0]):
# get each row's URL, name and position
# get the player's game logs
# create instance of Player class
# and retrieve game logs and aggregated data
url = self.players.loc[i,"URL"]
name = self.players.loc[i,"Name"]
pos = self.players.loc[i,"Position"]
player_data = self.gamelogs.query("Player==@name")
player = Player(url, name, self.season, pos, player_data, self.last_date)
self.updated_gamelogs = self.updated_gamelogs.append(player.get_gamelogs())
self.agg_data = self.agg_data.append(player.get_aggregated_data())
# if update is false (scraping all games)
else:
self.gamelogs = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import ee
import pandas as pd
import datetime
import geopandas
# Filter collection by point and date
def collection_filtering(point, collection_name, year_range, doy_range):
collection = ee.ImageCollection(collection_name)\
.filterBounds(point)\
.filter(ee.Filter.calendarRange(year_range[0], year_range[1], 'year'))\
.filter(ee.Filter.dayOfYear(doy_range[0],doy_range[1]))
return collection
# Cloud masking for C1, L4-L7. Operators capitalized to
# avoid confusing with internal Python operators
def cloud_mask_l4_7_C1(img):
pqa = ee.Image(img).select(['pixel_qa'])
mask = (pqa.eq(66)).Or(pqa.eq(130))\
.Or(pqa.eq(68)).Or(pqa.eq(132))
return ee.Image(img).updateMask(mask)
# Cloud masking for C1, L8
def cloud_mask_l8_C1(img):
pqa = ee.Image(img).select(['pixel_qa'])
mask = (pqa.eq(322)).Or(pqa.eq(386)).Or(pqa.eq(324))\
.Or(pqa.eq(388)).Or(pqa.eq(836)).Or(pqa.eq(900))
return ee.Image(img).updateMask(mask)
def stack_renamer_l4_7_C1(img):
band_list = ['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'B6', 'pixel_qa']
name_list = ['BLUE', 'GREEN', 'RED', 'NIR', 'SWIR1', 'SWIR2', 'THERMAL',
'pixel_qa']
return ee.Image(img).select(band_list).rename(name_list)
def stack_renamer_l8_C1(img):
band_list = ['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B10', 'pixel_qa']
name_list = ['BLUE', 'GREEN', 'RED', 'NIR', 'SWIR1', 'SWIR2', 'THERMAL',
'pixel_qa']
return ee.Image(img).select(band_list).rename(name_list)
# filter and merge collections
def get_full_collection(coords, year_range, doy_range):
point = ee.Geometry.Point(coords)
l8_renamed = collection_filtering(point, 'LANDSAT/LC08/C01/T1_SR', year_range, doy_range)\
.map(stack_renamer_l8_C1)
l8_filtered1 = l8_renamed.map(cloud_mask_l8_C1)
l7_renamed = collection_filtering(point, 'LANDSAT/LE07/C01/T1_SR', year_range, doy_range)\
.map(stack_renamer_l4_7_C1);
l7_filtered1 = l7_renamed.map(cloud_mask_l4_7_C1)
l5_renamed = collection_filtering(point, 'LANDSAT/LT05/C01/T1_SR', year_range, doy_range)\
.map(stack_renamer_l4_7_C1)
l5_filtered1 = l5_renamed.map(cloud_mask_l4_7_C1)
all_scenes = ee.ImageCollection((l8_filtered1.merge(l7_filtered1))\
.merge(l5_filtered1)).sort('system:time_start').map(doIndices)
return all_scenes
# Utility function for calculating spectral indices
def doIndices(fullImage):
image = fullImage.select(['BLUE', 'GREEN', 'RED', 'NIR', 'SWIR1', 'SWIR2'])
# Parameters
cfThreshold = 20000
soil = [2000, 3000, 3400, 5800, 6000, 5800]
gv = [500, 900, 400, 6100, 3000, 1000]
npv = [1400, 1700, 2200, 3000, 5500, 3000]
shade = [0, 0, 0, 0, 0, 0]
cloud = [9000, 9600, 8000, 7800, 7200, 6500]
cfThreshold = ee.Image.constant(cfThreshold)
# Do spectral unmixing on a single image */
unmixImage = ee.Image(image).unmix([gv, shade, npv, soil, cloud], True,True).multiply(ee.Image(10000))\
.rename(['band_0', 'band_1', 'band_2','band_3','band_4'])
newImage = ee.Image(fullImage).addBands(unmixImage)
ndfi = ee.Image(unmixImage).expression(
'((GV / (10000 - SHADE)) - (NPV + SOIL)) / ((GV / (10000 - SHADE)) + NPV + SOIL)', {
'GV': ee.Image(unmixImage).select('band_0'),
'SHADE': ee.Image(unmixImage).select('band_1'),
'NPV': ee.Image(unmixImage).select('band_2'),
'SOIL': ee.Image(unmixImage).select('band_3')
})
ndvi = ee.Image(image).normalizedDifference(['NIR','RED']).rename('NDVI')
evi = ee.Image(image).expression(
'float(2.5*(((B4/10000) - (B3/10000)) / ((B4/10000) + (6 * (B3/10000)) - (7.5 * (B1/10000)) + 1)))',
{
'B4': ee.Image(image).select(['NIR']),
'B3': ee.Image(image).select(['RED']),
'B1': ee.Image(image).select(['BLUE'])
}).rename('EVI')
brightness = ee.Image(image).expression(
'(L1 * BLUE) + (L2 * GREEN) + (L3 * RED) + (L4 * NIR) + (L5 * SWIR1) + (L6 * B6)',
{
'L1': ee.Image(image).select('BLUE'), 'BLUE': 0.2043,
'L2': ee.Image(image).select('GREEN'),'GREEN': 0.4158,
'L3': ee.Image(image).select('RED'), 'RED': 0.5524,
'L4': ee.Image(image).select('NIR'), 'NIR': 0.5741,
'L5': ee.Image(image).select('SWIR1'), 'SWIR1': 0.3124,
'L6': ee.Image(image).select('SWIR2'), 'B6': 0.2303
}).rename('BRIGHTNESS')
greenness = ee.Image(image).expression(
'(L1 * BLUE) + (L2 * GREEN) + (L3 * RED) + (L4 * NIR) + (L5 * SWIR1) + (L6 * B6)',
{
'L1': image.select('BLUE'), 'BLUE': -0.1603,
'L2': image.select('GREEN'), 'GREEN': -0.2819,
'L3': image.select('RED'), 'RED': -0.4934,
'L4': image.select('NIR'), 'NIR': 0.7940,
'L5': image.select('SWIR1'), 'SWIR1': -0.0002,
'L6': image.select('SWIR2'), 'B6': -0.1446
}).rename('GREENNESS')
wetness = ee.Image(image).expression(
'(L1 * BLUE) + (L2 * GREEN) + (L3 * RED) + (L4 * NIR) + (L5 * SWIR1) + (L6 * B6)',
{
'L1': image.select('BLUE'), 'BLUE': 0.0315,
'L2': image.select('GREEN'), 'GREEN': 0.2021,
'L3': image.select('RED'), 'RED': 0.3102,
'L4': image.select('NIR'), 'NIR': 0.1594,
'L5': image.select('SWIR1'), 'SWIR1': -0.6806,
'L6': image.select('SWIR2'), 'B6': -0.6109
}).rename('WETNESS')
return ee.Image(newImage)\
.addBands([ndfi.rename(['NDFI']).multiply(10000), ndvi.multiply(10000),\
evi.multiply(10000), brightness, greenness, wetness])\
.select(['band_0','band_1','band_2','band_3','NDFI','NDVI','EVI','BLUE',\
'GREEN', 'RED', 'NIR', 'SWIR1', 'SWIR2','THERMAL', 'BRIGHTNESS',\
'GREENNESS', 'WETNESS', 'pixel_qa'])\
.rename(['GV','Shade','NPV','Soil','NDFI','NDVI','EVI','BLUE', 'GREEN', \
'RED', 'NIR', 'SWIR1', 'SWIR2','THERMAL', 'BRIGHTNESS', \
'GREENNESS', 'WETNESS', 'pixel_qa'])
# Get time series for location as a pandas dataframe
def get_df_full(collection, coords):
point = ee.Geometry.Point(coords)
# Sample for a time series of values at the point.
filtered_col = collection.filterBounds(point)
geom_values = filtered_col.getRegion(geometry=point, scale=30)
geom_values_list = ee.List(geom_values).getInfo()
# Convert to a Pandas DataFrame.
header = geom_values_list[0]
data = pd.DataFrame(geom_values_list[1:], columns=header)
data['datetime'] = pd.to_datetime(data['time'], unit='ms', utc=True)
data['doy'] = | pd.DatetimeIndex(data['datetime']) | pandas.DatetimeIndex |
from surfboard.sound import Waveform
# import numpy as np
import pandas as pd
import altair as alt
path = "../resources/no-god.wav"
# Instantiate from a .wav file.
sound = Waveform(path=path, sample_rate=44100)
# OR: instantiate from a numpy array.
# sound = Waveform(signal=np.sin(np.arange(0, 2 * np.pi, 1/24000)), sample_rate=44100)
f0_contour = sound.f0_contour()
# print(f0_contour)
df = | pd.DataFrame(f0_contour[0], columns=["pitch"]) | pandas.DataFrame |
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This file contains layer linting functions.
"""
from collections import OrderedDict
from collections import OrderedDict
import pandas as pd
from .activations import create_activations
from .engine_plan import EnginePlan
class ConvLinter():
"""Convolution layer linter."""
def __init__(self, plan: EnginePlan):
self.plan = plan
self.convs = plan.get_layers_by_type('Convolution')
def tc_lint(self):
"""Search for Convolutions which are not accelerated by TensorCode"""
def is_small_conv(conv):
inputs, _ = create_activations(conv)
n, c, h, w = inputs[0].shape
return c < 32
report = OrderedDict()
# Look for kernels that are not scheduled for xmma (TensorCore
# acceleration)
tc_candidates = self.convs.query(f"precision != \"FP32\"").copy()
# Identify acceleration from tactic name
df = tc_candidates
df = df[df['tactic'].str.contains("imma|hmma|xmma|i88|884", na=False) == False]
for index, conv in df.iterrows():
mitigation = ""
if is_small_conv(conv):
mitigation = "This Convolution has a small number " \
"of input channels so acceleration may not be possible."
report[conv.Name] = OrderedDict({
'name': conv.Name,
'tactic': conv.tactic,
'subtype': conv.subtype,
'hazard': "Convolution is not accelerated.",
'mitigation': mitigation,
'help': "TensorCores accelerate large Convolution and GEMM operations."
})
return report
def mixed_precision_lint(self):
"""Search for Convolutions with Int8 inputs and Float outputs"""
report = OrderedDict()
df = self.convs
df = df.loc[df['precision'] == 'INT8'].copy()
for index, conv in df.iterrows():
inputs, outputs = create_activations(conv)
inf = inputs[0].format[:4]
outf = outputs[0].format[:4]
found = inf == 'Int8' and outf != 'Int8'
if found:
report[conv.Name] = OrderedDict({
'name': conv.Name,
'tactic': conv.tactic,
'subtype': conv.subtype,
'hazard': "Quantized Convolution has float outputs.",
'mitigation': "Consider adding quantization after the convolution.",
'help': "Quantized Convolution with float outputs is ill advised "
"for memory-limited convolutions."
})
return report
def lint(self):
report = self.tc_lint()
report.update(self.mixed_precision_lint())
df = | pd.DataFrame.from_dict(report, orient='index') | pandas.DataFrame.from_dict |
import os
from os.path import join
import pandas as pd
import numpy as np
import torch
from Hessian.GAN_hessian_compute import hessian_compute
# from hessian_analysis_tools import scan_hess_npz, plot_spectra, average_H, compute_hess_corr, plot_consistency_example
# from hessian_axis_visualize import vis_eigen_explore, vis_eigen_action, vis_eigen_action_row, vis_eigen_explore_row
from GAN_utils import loadStyleGAN2, StyleGAN2_wrapper, loadBigGAN, BigGAN_wrapper, loadPGGAN, PGGAN_wrapper
import matplotlib.pylab as plt
import matplotlib
#%%
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
import lpips
ImDist = lpips.LPIPS(net="squeeze").cuda()
# SSIM
import pytorch_msssim
D = pytorch_msssim.SSIM() # note SSIM, higher the score the more similar they are. So to confirm to the distance convention, we use 1 - SSIM as a proxy to distance.
# L2 / MSE
def MSE(im1, im2):
return (im1 - im2).pow(2).mean(dim=[1,2,3])
# L1 / MAE
# def L1(im1, im2):
# return (im1 - im2).abs().mean(dim=[1,2,3])
# Note L1 is less proper for this task, as it's farther away from a distance square function.
#%% Utility functions to quantify relationship between 2 eigvals or 2 Hessians.
def spectra_cmp(eigvals1, eigvals2, show=True):
cc = np.corrcoef((eigvals1), (eigvals2))[0, 1]
logcc = np.corrcoef(np.log10(np.abs(eigvals1)+1E-8), np.log10(np.abs(eigvals2)+1E-8))[0, 1]
reg_coef = np.polyfit((eigvals1), (eigvals2), 1)
logreg_coef = np.polyfit(np.log10(np.abs(eigvals1)+1E-8), np.log10(np.abs(eigvals2)+1E-8), 1)
if show:
print("Correlation %.3f (lin) %.3f (log). Regress Coef [%.2f, %.2f] (lin) [%.2f, %.2f] (log)"%
(cc, logcc, *tuple(reg_coef), *tuple(logreg_coef)))
return cc, logcc, reg_coef, logreg_coef
def Hessian_cmp(eigvals1, eigvecs1, H1, eigvals2, eigvecs2, H2, show=True):
H_cc = np.corrcoef(H1.flatten(), H2.flatten())[0,1]
logH1 = eigvecs1 * np.log10(np.abs(eigvals1))[np.newaxis, :] @ eigvecs1.T
logH2 = eigvecs2 * np.log10(np.abs(eigvals2))[np.newaxis, :] @ eigvecs2.T
logH_cc = np.corrcoef(logH1.flatten(), logH2.flatten())[0, 1]
if show:
print("Entrywise Correlation Hessian %.3f log Hessian %.3f (log)"% (H_cc, logH_cc,))
return H_cc, logH_cc
def top_eigvec_corr(eigvects1, eigvects2, eignum=10):
cc_arr = []
for eigi in range(eignum):
cc = np.corrcoef(eigvects1[:, -eigi-1], eigvects2[:, -eigi-1])[0, 1]
cc_arr.append(cc)
return np.abs(cc_arr)
def eigvec_H_corr(eigvals1, eigvects1, H1, eigvals2, eigvects2, H2, show=True):
vHv12 = np.diag(eigvects1.T @ H2 @ eigvects1)
vHv21 = np.diag(eigvects2.T @ H1 @ eigvects2)
cc_12 = np.corrcoef(vHv12, eigvals2)[0, 1]
cclog_12 = np.corrcoef(np.log(np.abs(vHv12)+1E-8), np.log(np.abs(eigvals2+1E-8)))[0, 1]
cc_21 = np.corrcoef(vHv21, eigvals1)[0, 1]
cclog_21 = np.corrcoef(np.log(np.abs(vHv21)+1E-8), np.log(np.abs(eigvals1+1E-8)))[0, 1]
if show:
print("Applying eigvec 1->2: corr %.3f (lin) %.3f (log)"%(cc_12, cclog_12))
print("Applying eigvec 2->1: corr %.3f (lin) %.3f (log)"%(cc_21, cclog_21))
return cc_12, cclog_12, cc_21, cclog_21
#%%
saveroot = r"E:\Cluster_Backup"
#%%
BGAN = loadBigGAN()
G = BigGAN_wrapper(BGAN)
savedir = join(saveroot, "ImDist_cmp\\BigGAN")
os.makedirs(savedir, exist_ok=True)
SSIM_stat_col = []
MSE_stat_col = []
for idx in range(100):
refvec = G.sample_vector(1, device="cuda")# 0.1 * torch.randn(1, 256)
eigvals_PS, eigvects_PS, H_PS = hessian_compute(G, refvec, ImDist, hessian_method="BP")
eigvals_SSIM, eigvects_SSIM, H_SSIM = hessian_compute(G, refvec, D, hessian_method="BP")
eigvals_MSE, eigvects_MSE, H_MSE = hessian_compute(G, refvec, MSE, hessian_method="BP")
#% eigvals_L1, eigvects_L1, H_L1 = hessian_compute(G, refvec, L1, hessian_method="BP")
print("SSIM - LPIPS comparison")
cc_SSIM, logcc_SSIM, reg_coef_SSIM, logreg_coef_SSIM = spectra_cmp(-eigvals_SSIM[::-1], eigvals_PS, show=True)
H_cc_SSIM, logH_cc_SSIM = Hessian_cmp(-eigvals_SSIM[::-1], eigvects_SSIM, -H_SSIM, eigvals_PS, eigvects_PS, H_PS, show=True)
SSIM_stat_col.append((idx, cc_SSIM, logcc_SSIM, *tuple(reg_coef_SSIM), *tuple(logreg_coef_SSIM), H_cc_SSIM, logH_cc_SSIM))
print("MSE - LPIPS comparison")
cc_MSE, logcc_MSE, reg_coef_MSE, logreg_coef_MSE = spectra_cmp(eigvals_MSE, eigvals_PS, show=True)
H_cc_MSE, logH_cc_MSE = Hessian_cmp(eigvals_MSE, eigvects_MSE, H_MSE, eigvals_PS, eigvects_PS, H_PS, show=True)
MSE_stat_col.append((idx, cc_MSE, logcc_MSE, *tuple(reg_coef_MSE), *tuple(logreg_coef_MSE), H_cc_MSE, logH_cc_MSE))
np.savez(join(savedir,"Hess_cmp_%03d.npz"%idx), **{"eva_PS":eigvals_PS, "evc_PS":eigvects_PS, "H_PS":H_PS,
"eva_SSIM":eigvals_SSIM, "evc_SSIM":eigvects_SSIM, "H_SSIM":H_SSIM,
"eva_MSE":eigvals_MSE, "evc_MSE":eigvects_MSE, "H_MSE":H_MSE,})
np.savez(join(savedir, "H_cmp_stat.npz"), MSE_stat=MSE_stat_col, SSIM_stat=SSIM_stat_col)
MSE_stat_tab = pd.DataFrame(MSE_stat_col, columns=["id", "cc", "logcc", "reg_slop", "reg_intcp", "reg_log_slop", "reg_log_intcp", "H_cc", "logH_cc"])
MSE_stat_tab.to_csv(join(savedir, "H_cmp_MSE_stat.csv"))
SSIM_stat_tab = pd.DataFrame(SSIM_stat_col, columns=["id", "cc", "logcc", "reg_slop", "reg_intcp", "reg_log_slop", "reg_log_intcp", "H_cc", "logH_cc"])
SSIM_stat_tab.to_csv(join(savedir, "H_cmp_SSIM_stat.csv"))
del G, BGAN
torch.cuda.empty_cache()
#%%
PGGAN = loadPGGAN()
G = PGGAN_wrapper(PGGAN)
savedir = join(saveroot, "ImDist_cmp\\PGGAN")
os.makedirs(savedir, exist_ok=True)
SSIM_stat_col = []
MSE_stat_col = []
for idx in range(100):
refvec = G.sample_vector(1, device="cuda")# 0.1 * torch.randn(1, 256)
eigvals_PS, eigvects_PS, H_PS = hessian_compute(G, refvec, ImDist, hessian_method="BP")
eigvals_SSIM, eigvects_SSIM, H_SSIM = hessian_compute(G, refvec, D, hessian_method="BP")
eigvals_MSE, eigvects_MSE, H_MSE = hessian_compute(G, refvec, MSE, hessian_method="BP")
#% eigvals_L1, eigvects_L1, H_L1 = hessian_compute(G, refvec, L1, hessian_method="BP")
print("SSIM - LPIPS comparison")
cc_SSIM, logcc_SSIM, reg_coef_SSIM, logreg_coef_SSIM = spectra_cmp(-eigvals_SSIM[::-1], eigvals_PS, show=True)
H_cc_SSIM, logH_cc_SSIM = Hessian_cmp(-eigvals_SSIM[::-1], eigvects_SSIM, -H_SSIM, eigvals_PS, eigvects_PS, H_PS, show=True)
SSIM_stat_col.append((idx, cc_SSIM, logcc_SSIM, *tuple(reg_coef_SSIM), *tuple(logreg_coef_SSIM), H_cc_SSIM, logH_cc_SSIM))
print("MSE - LPIPS comparison")
cc_MSE, logcc_MSE, reg_coef_MSE, logreg_coef_MSE = spectra_cmp(eigvals_MSE, eigvals_PS, show=True)
H_cc_MSE, logH_cc_MSE = Hessian_cmp(eigvals_MSE, eigvects_MSE, H_MSE, eigvals_PS, eigvects_PS, H_PS, show=True)
MSE_stat_col.append((idx, cc_MSE, logcc_MSE, *tuple(reg_coef_MSE), *tuple(logreg_coef_MSE), H_cc_MSE, logH_cc_MSE))
np.savez(join(savedir,"Hess_cmp_%03d.npz"%idx), **{"eva_PS":eigvals_PS, "evc_PS":eigvects_PS, "H_PS":H_PS,
"eva_SSIM":eigvals_SSIM, "evc_SSIM":eigvects_SSIM, "H_SSIM":H_SSIM,
"eva_MSE":eigvals_MSE, "evc_MSE":eigvects_MSE, "H_MSE":H_MSE,})
np.savez(join(savedir, "H_cmp_stat.npz"), MSE_stat=MSE_stat_col, SSIM_stat=SSIM_stat_col)
MSE_stat_tab = pd.DataFrame(MSE_stat_col, columns=["id", "cc", "logcc", "reg_slop", "reg_intcp", "reg_log_slop", "reg_log_intcp", "H_cc", "logH_cc"])
MSE_stat_tab.to_csv(join(savedir, "H_cmp_MSE_stat.csv"))
SSIM_stat_tab = pd.DataFrame(SSIM_stat_col, columns=["id", "cc", "logcc", "reg_slop", "reg_intcp", "reg_log_slop", "reg_log_intcp", "H_cc", "logH_cc"])
SSIM_stat_tab.to_csv(join(savedir, "H_cmp_SSIM_stat.csv"))
#%%
modelsnm = "Face256"
SGAN = loadStyleGAN2("ffhq-256-config-e-003810.pt", size=256,)
G = StyleGAN2_wrapper(SGAN, )
savedir = join(saveroot, "ImDist_cmp\\StyleGAN2\\Face256")
os.makedirs(savedir, exist_ok=True)
SSIM_stat_col = []
MSE_stat_col = []
for idx in range(100):
refvec = G.sample_vector(1, device="cuda")# 0.1 * torch.randn(1, 256)
eigvals_PS, eigvects_PS, H_PS = hessian_compute(G, refvec, ImDist, hessian_method="BP")
eigvals_SSIM, eigvects_SSIM, H_SSIM = hessian_compute(G, refvec, D, hessian_method="BP")
eigvals_MSE, eigvects_MSE, H_MSE = hessian_compute(G, refvec, MSE, hessian_method="BP")
#% eigvals_L1, eigvects_L1, H_L1 = hessian_compute(G, refvec, L1, hessian_method="BP")
print("SSIM - LPIPS comparison")
cc_SSIM, logcc_SSIM, reg_coef_SSIM, logreg_coef_SSIM = spectra_cmp(-eigvals_SSIM[::-1], eigvals_PS, show=True)
H_cc_SSIM, logH_cc_SSIM = Hessian_cmp(-eigvals_SSIM[::-1], eigvects_SSIM, -H_SSIM, eigvals_PS, eigvects_PS, H_PS, show=True)
SSIM_stat_col.append((idx, cc_SSIM, logcc_SSIM, *tuple(reg_coef_SSIM), *tuple(logreg_coef_SSIM), H_cc_SSIM, logH_cc_SSIM))
print("MSE - LPIPS comparison")
cc_MSE, logcc_MSE, reg_coef_MSE, logreg_coef_MSE = spectra_cmp(eigvals_MSE, eigvals_PS, show=True)
H_cc_MSE, logH_cc_MSE = Hessian_cmp(eigvals_MSE, eigvects_MSE, H_MSE, eigvals_PS, eigvects_PS, H_PS, show=True)
MSE_stat_col.append((idx, cc_MSE, logcc_MSE, *tuple(reg_coef_MSE), *tuple(logreg_coef_MSE), H_cc_MSE, logH_cc_MSE))
np.savez(join(savedir,"Hess_cmp_%03d.npz"%idx), **{"eva_PS":eigvals_PS, "evc_PS":eigvects_PS, "H_PS":H_PS,
"eva_SSIM":eigvals_SSIM, "evc_SSIM":eigvects_SSIM, "H_SSIM":H_SSIM,
"eva_MSE":eigvals_MSE, "evc_MSE":eigvects_MSE, "H_MSE":H_MSE,})
np.savez(join(savedir, "H_cmp_stat.npz"), MSE_stat=MSE_stat_col, SSIM_stat=SSIM_stat_col)
MSE_stat_tab = pd.DataFrame(MSE_stat_col, columns=["id", "cc", "logcc", "reg_slop", "reg_intcp", "reg_log_slop", "reg_log_intcp", "H_cc", "logH_cc"])
MSE_stat_tab.to_csv(join(savedir, "H_cmp_MSE_stat.csv"))
SSIM_stat_tab = | pd.DataFrame(SSIM_stat_col, columns=["id", "cc", "logcc", "reg_slop", "reg_intcp", "reg_log_slop", "reg_log_intcp", "H_cc", "logH_cc"]) | pandas.DataFrame |
"""
Author: <NAME>
"""
import numpy as np
import pandas as pd
class Naive_Bayes_Classifier():
def __init__(self):
#save the classes and their data
self.data_class={}
def fit(self,X_train,y_train):
def group_data_to_classes(data_class,X_train,y_train):
class0=True
class1=True
for i in range(y_train.shape[0]):
X_temp=X_train[i,:].reshape(X_train[i,:].shape[0],1)
if y_train[i]==0:
if class0==True:
data_class[0]=X_temp
class0=False
else:
data_class[0]=np.append(data_class[0],X_temp,axis=1)
elif y_train[i]==1:
if class1==True:
data_class[1]=X_temp
class1=False
else:
data_class[1]=np.append(data_class[1],X_temp,axis=1)
return data_class
#set the train set and target
self.X_train=X_train
self.y_train=y_train
#initialize data array
self.data_class[0]=np.array([[]])
self.data_class[1]=np.array([[]])
#find data and their classess
self.data_class=group_data_to_classes(self.data_class,self.X_train,self.y_train)
self.data_class[0]=self.data_class[0].T
self.data_class[1]=self.data_class[1].T
#calculate the means for the train set
self.mean_1=np.mean(self.data_class[0],axis=0)
self.mean_2=np.mean(self.data_class[1],axis=0)
#calculate the standard deviation for the train set
self.std_1=np.std(self.data_class[0],axis=0)
self.std_2=np.std(self.data_class[1],axis=0)
def predict(self, X_test):
"""
For numerical data modeled as a normal distribution,
we can use the Gaussian/normal distribution function to calculate likelihood
"""
def calc_posterior(X, X_train_class, mean_, std_):
def class_likelihood(x, mean, std):
#use the normal pdf to calculate the likelihood
lieklihood = (np.sqrt(2*np.pi*std)**-1)*np.exp(-(x-mean)**2/(2*std**2))
return lieklihood
#product of class likelihoods for all features in the data
likelihood_prod = np.prod(class_likelihood(X,mean_,std_),axis=1)
#class prior
prior = X_train_class.shape[0]/self.X_train.shape[0]
#class posterior distribution
posterior=likelihood_prod*prior
return posterior
#class 0 posterior
class_0=calc_posterior(X_test,self.data_class[0],self.mean_1,self.std_1)
#class 1 posterior
class_1=calc_posterior(X_test,self.data_class[1],self.mean_2,self.std_2)
#find the class that each data row belongs to
y_pred =[]
for i, j in zip(class_0, class_1):
if (i > j):
y_pred.append(0)
else:
y_pred.append(1)
#store data to a dataframe to return
results = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: UTF-8 -*-
"""
base class and functions to handle with hmp file and GWAS results
"""
import re
import sys
import numpy as np
import pandas as pd
import os.path as op
from tqdm import tqdm
from pathlib import Path
from subprocess import call
from collections import Counter
from schnablelab.apps.base import ActionDispatcher, OptionParser, put2slurm
plink = op.abspath(op.dirname(__file__)) + '/../apps/plink'
GEC = op.abspath(op.dirname(__file__)) + '/../apps/gec.jar'
def main():
actions = (
('FilterMissing', 'filter out SNPs with high missing rate'),
('FilterMAF', 'filter out SNP with extremely low minor allele frequency'),
('FilterHetero', 'filter out SNPs with high heterozygous rates'),
('SubsamplingSNPs', 'grep a subset of specified SNPs from a hmp file'),
('DownsamplingSNPs', 'pick up some SNPs from a huge hmp file using Linux sed command'),
('SubsamplingSMs', 'grep a subset of samples from a hmp file'),
('hmp2ped', 'convert hmp file to plink map and ped file'),
('ped2bed', 'convert plink ped format to binary bed format'),
('IndePvalue', 'estimate the number of independent SNPs using GEC'),
('hmpSingle2Double', 'convert single hmp to double type hmp'),
('Info', 'get basic info for a hmp file'),
('MAFs', 'calculate the MAF for all/specified SNPs in hmp'),
('sortHmp', 'Sort hmp file based on chromosome order and position'),
('reheader', 'edit sample names in header only'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
# N:missing, -:gap
geno_one2two = {
'A':'AA', 'C':'CC', 'G':'GG', 'T':'TT',
'R':'AG', 'Y':'CT', 'S':'GC', 'W':'AT', 'K':'GT', 'M':'AC',
'N':'NN', '-':'--'}
geno_two2one = {
'AA': 'A', 'CC': 'C', 'GG': 'G', 'TT': 'T',
'GA': 'R', 'AG': 'R', 'TC': 'Y', 'CT': 'Y',
'CG': 'S', 'GC': 'S', 'TA': 'W', 'AT': 'W',
'TG': 'K', 'GT': 'K', 'CA': 'M', 'AC': 'M',
'NN': 'N', '--': '-'}
def sortchr(x):
'''
criteria to sort chromosome names
'''
x1 = re.findall(r'\d+$', x)
if len(x1)==1:
return int(x1[0])
else:
sys.exit('check chromosome name!')
class ParseHmp():
'''
parse hmp file
'''
def __init__(self, filename):
'''
args:
filename: hmp file name
type: hmp format. double or single
'''
self.fn = filename
with open(filename) as f:
headerline = f.readline()
SMs_header = headerline.split()[:11]
SMs = headerline.split()[11:]
numSMs = len(SMs)
firstgeno = f.readline().split()[11]
type = 'single' if len(firstgeno)==1 else 'double'
#print('guess hmp type: %s'%type)
numSNPs = sum(1 for _ in f)
dtype_dict = {i:'str' for i in headerline.split()}
dtype_dict['pos'] = np.int64
self.headerline = headerline
self.SMs_header = SMs_header
self.SMs = SMs
self.numSMs = numSMs
self.numSNPs = numSNPs+1
self.type = type
self.dtype_dict = dtype_dict
def AsDataframe(self, needsort=False):
'''
args:
needsort: if hmp need to be sorted
'''
df = | pd.read_csv(self.fn, delim_whitespace=True, dtype=self.dtype_dict) | pandas.read_csv |
# Author: <NAME>, PhD
# University of Los Angeles California
import os
import sys
import re
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
from scipy import optimize
from itertools import groupby
# from https://gist.github.com/walkermatt/2871026
from threading import Timer
def debounce(wait):
""" Decorator that will postpone a functions
execution until after wait seconds
have elapsed since the last time it was invoked. """
def decorator(fn):
def debounced(*args, **kwargs):
def call_it():
fn(*args, **kwargs)
try:
debounced.t.cancel()
except(AttributeError):
pass
debounced.t = Timer(wait, call_it)
debounced.t.start()
return debounced
return decorator
#############################################################################
# --------- Natural Abundance Correction CLASS -----------------------------#
#############################################################################
class NAProcess:
# Adapted from IsoCor code (https://github.com/MetaSys-LISBP/IsoCor)
##################
## Init and setup
##################
def __init__(self, entry, atomTracer="H", purityTracer=[0, 1], FAMES=True, CHOL=False):
self.NaturalAbundanceDistributions = self.__getNaturalAbundanceDistributions()
self.formula = self.getFAFormulaString(entry, FAMES, CHOL)
self.elementsDict = self.parseFormula(self.formula)
self.atomTracer = atomTracer
self.purityTracer = purityTracer
self.correctionMatrix = self.computeCorrectionMatrix(self.elementsDict, self.atomTracer, self.NaturalAbundanceDistributions, purityTracer)
def getFAFormulaString(self, entry, FAMES, CHOL=False):
''' Return formula string e.g.: C3H2O3'''
regex = "C([0-9]+):([0-9]+)"
carbon,doubleBond = [int(val) for val in re.findall(regex, entry)[0]]
hydrogen = 3+(carbon-2)*2+1-2*doubleBond
oxygen = 2
silicon = 0
if (FAMES):
carbon=carbon+1
hydrogen=hydrogen-1+3
if (CHOL):
carbon, hydrogen, oxygen, silicon = 30, 54, 1, 1
return "".join(["".join([letter,str(n)]) for [letter,n] in [
["C", carbon],
["H", hydrogen],
["Si", silicon],
["O", oxygen]] if n>0])
def parseFormula(self, formula):
"""
Parse the elemental formula and return the number
of each element in a dictionnary d={'El_1':x,'El_2':y,...}.
"""
regex = f"({'|'.join(self.NaturalAbundanceDistributions.keys())})([0-9]{{0,}})"
elementDict = dict((element, 0) for element in self.NaturalAbundanceDistributions.keys())
for element,n in re.findall(regex, formula):
if n:
elementDict[element] += int(n)
else:
elementDict[element] += 1
return elementDict
def __getNaturalAbundanceDistributions(self):
'''Return a dictionary of the isotopic proportions at natural abundance
desribed in https://www.ncbi.nlm.nih.gov/pubmed/27989585'''
H1, H2 = 0.999885, 0.000115
C12, C13 = 0.9893, 0.0107
N14, N15 = 0.99632, 0.00368
O16, O17, O18 = 0.99757, 0.00038, 0.00205
Si28, Si29, Si30 = 0.922297, 0.046832, 0.030872
S32, S33, S34, S36 = 0.9493, 0.0076, 0.0429, 0.0002
return {'H': np.array([H1, H2]), # hydrogen
'C': np.array([C12, C13]), # carbon
'N': np.array([N14, N15]), # nitrogen
'O': np.array([O16, O17, O18]), # oxygen
'Si': np.array([Si28, Si29, Si30]), # silicon
'S': np.array([S32, S33, S34, S36])} # sulphur
def __calculateMassDistributionVector(self, elementDict, atomTracer, NADistributions):
"""
Calculate a mass distribution vector (at natural abundancy),
based on the elemental compositions of metabolite.
The element corresponding to the isotopic tracer is not taken
into account in the metabolite moiety.
"""
result = np.array([1.])
for atom,n in elementDict.items():
if atom not in [atomTracer]:
for i in range(n):
result = np.convolve(result, NADistributions[atom])
return result
def computeCorrectionMatrix(self, elementDict, atomTracer, NADistributions, purityTracer):
# calculate correction vector used for correction matrix construction
# it corresponds to the mdv at natural abundance of all elements except the
# isotopic tracer
correctionVector = self.__calculateMassDistributionVector(elementDict, atomTracer, NADistributions)
# check if the isotopic tracer is present in formula
try:
nAtomTracer = elementDict[atomTracer]
except:
print("The isotopic tracer must to be present in the metabolite formula!")
tracerNADistribution = NADistributions[atomTracer]
m = 1+nAtomTracer*(len(tracerNADistribution)-1)
c = len(correctionVector)
if m > c + nAtomTracer*(len(tracerNADistribution)-1):
print("There might be a problem in matrix size.\nFragment does not contains enough atoms to generate this isotopic cluster.")
if c < m:
# padd with zeros
correctionVector.resize(m)
# create correction matrix
correctionMatrix = np.zeros((m, nAtomTracer+1))
for i in range(nAtomTracer+1):
column = correctionVector[:m]
for na in range(i):
column = np.convolve(column, purityTracer)[:m]
for nb in range(nAtomTracer-i):
column = np.convolve(column, tracerNADistribution)[:m]
correctionMatrix[:,i] = column
return correctionMatrix
##################
## Data processing
##################
def _computeCost(self, currentMID, target, correctionMatrix):
"""
Cost function used for BFGS minimization.
return : (sum(target - correctionMatrix * currentMID)^2, gradient)
"""
difference = target - np.dot(correctionMatrix, currentMID)
# calculate sum of square differences and gradient
return (np.dot(difference, difference), np.dot(correctionMatrix.transpose(), difference)*-2)
def _minimizeCost(self, args):
'''
Wrapper to perform least-squares optimization via the limited-memory
Broyden-Fletcher-Goldfarb-Shanno algorithm, with an explicit lower boundary
set to zero to eliminate any potential negative fractions.
'''
costFunction, initialMID, target, correctionMatrix = args
res = optimize.minimize(costFunction, initialMID, jac=True, args=(target, correctionMatrix),
method='L-BFGS-B', bounds=[(0., float('inf'))]*len(initialMID),
options={'gtol': 1e-10, 'eps': 1e-08, 'maxiter': 15000, 'ftol': 2.220446049250313e-09,
'maxcor': 10, 'maxfun': 15000})
return res
def correctForNaturalAbundance(self, dataFrame, method="LSC"):
'''
Correct the Mass Isotope Distributions (MID) from a given dataFrame.
Method: SMC (skewed Matrix correction) / LSC (Least Squares Skewed Correction)
'''
correctionMatrix = self.computeCorrectionMatrix(self.elementsDict, self.atomTracer, self.NaturalAbundanceDistributions, self.purityTracer)
nRows, nCols = correctionMatrix.shape
# ensure compatible sizes (will extend data)
if nCols<dataFrame.shape[1]:
print("The measure MID has more clusters than the correction matrix.")
else:
dfData = np.zeros((len(dataFrame), nCols))
dfData[:dataFrame.shape[0], :dataFrame.shape[1]] = dataFrame.values
if method == "SMC":
# will mltiply the data by inverse of the correction matrix
correctionMatrix = np.linalg.pinv(correctionMatrix)
correctedData = np.matmul(dfData, correctionMatrix.transpose())
# flatten unrealistic negative values to zero
correctedData[correctedData<0] = 0
elif method == "LSC":
# Prepare multiprocessing optimization
targetMIDList = dfData.tolist()
initialMID = np.zeros_like(targetMIDList[0])
argsList = [(self._computeCost, initialMID, targetMID, correctionMatrix) for targetMID in targetMIDList]
# minimize for each MID
allRes = [self._minimizeCost(args) for args in argsList]
correctedData = np.vstack([res.x for res in allRes])
return pd.DataFrame(columns=dataFrame.columns, data=correctedData[:, :dataFrame.shape[1]])
#############################################################################
# --------- DATA OBJECT CLASS ----------------------------------------------#
#############################################################################
class MSDataContainer:
##################
## Init and setup
##################
def __init__(self, fileNames, internalRef="C19:0", tracer="H", tracerPurity=[0.00, 1.00]):
assert len(fileNames)==2 , "You must choose 2 files!"
self.internalRef = internalRef
self._cholesterol = False
self.tracer = tracer
self.tracerPurity = tracerPurity
self.NACMethod = "LSC" # least squares skewed matrix correction
self.dataFileName, self.templateFileName = self.__getDataAndTemplateFileNames(fileNames)
self._baseFileName = os.path.basename(self.dataFileName).split('.')[0]
self.pathDirName = os.path.dirname(self.dataFileName)
self.__regexExpression = {"Samples": '^(?!neg|S\d+$)',
"colNames": '(\d+)_(\d+)(?:\.\d+)?_(\d+)'}
self.dataDf = self._computeFileAttributes()
self.__standardDf_template = self.__getStandardsTemplateDf()
self.volumeMixTotal = 500
self.volumeMixForPrep = 100
self.volumeStandards = [1, 5, 10, 20, 40, 80]
self.standardDf_nMoles = self.computeStandardMoles()
# for normalization
# volume (uL) in which the original sample was diluted
samplesLoc = self.dataDf.SampleName.str.match(self.__regexExpression["Samples"], na=False)
self.numberOfSamples = len(self.dataDf.loc[samplesLoc])
self.volumesOfDilution = [750]*self.numberOfSamples
# volume (uL) of sample used in MS
self.volumesOfSampleSoupUsed = [5]*self.numberOfSamples
self.weightNormalization = False
def __getDataAndTemplateFileNames(self, fileNames, templateKeyword="template"):
'''Classify files (data or template) based on fileName'''
dataFileName = [fileName for fileName in fileNames if templateKeyword not in fileName][0]
templateFileName = [fileName for fileName in fileNames if fileName != dataFileName][0]
return [dataFileName, templateFileName]
def __parseIon(self, ion):
ionID, ionMass, ionDescription = re.findall(self.__regexExpression["colNames"], ion)[0]
return {"id": int(ionID), "mass": int(ionMass), "description": ionDescription}
def __parseSampleColumns(self, columnNames):
# indexed ions from columns
ions = map(lambda name: self.__parseIon(name), columnNames)
return list(ions)#[self.__getIndexedIon(ion, i) for i,ion in enumerate(ions)]
def __isLabeledExperiment(self, ionsDetected):
# if more than 40% of the ions are duplicate, it probably means that the file is
# from a labeled experimnets (lots of fragments for each ion)
ionsList = list(map(lambda ion: ion["description"], ionsDetected))
uniqueIons = set(ionsList)
return len(uniqueIons)/len(ionsList) < 0.6
def __getIonParentedGroups(self, ionsDetected):
# groupby parental ions (save initial index for sorting later)
groupedIons = groupby(enumerate(ionsDetected), key=lambda ion: ion[1]['description'])
groupsIntraSorted = list(map(lambda group: (group[0], sorted(group[1], key=lambda ion: ion[1]['mass'])), groupedIons))
# split groups if most abundant ion present
finalGroups = []
for key,group in groupsIntraSorted:
# only process groups that have more than 1 ion
if len(group) != 1:
masses = np.array([ion[1]["mass"] for ion in group])
differences = masses[1:]-masses[0:-1]
idx = np.where(differences != 1)
# and that have non unitary jumps in differences from ion to ion
if len(idx[0])>0:
start = 0
for i in range(len(idx[0])+1):
if i < len(idx[0]):
end = idx[0][i]+1
subgroup = group[start:end]
start = idx[0][i] + 1
finalGroups.append((f"{key}-{i}", subgroup))
else:
subgroup = group[start:]
finalGroups.append((f"{key}-{i}", subgroup))
else:
finalGroups.append((key, group))
else:
finalGroups.append((key, group))
return finalGroups
def _computeFileAttributes(self):
# extract columns
columnsOfInterest = pd.read_excel(self.dataFileName, nrows=2).filter(regex=self.__regexExpression["colNames"]).columns
# load data and template files and isolate data part
df = pd.read_excel(self.dataFileName, skiprows=1)
templateMap = pd.read_excel(self.templateFileName, sheet_name="MAP")
# check if cholesterol experiment
letter = df["Name"][0][0] # F or C
df_Meta,df_Data = self.__getOrderedDfBasedOnTemplate(df, templateMap, letter)
if letter == "C":
self._cholesterol = True
# assign columns names
ionsDetected = self.__parseSampleColumns(columnsOfInterest)
self.dataColNames = [f"C{ion['description'][:2]}:{ion['description'][2:]} ({ion['mass']})" for ion in ionsDetected]
self.internalRefList = self.dataColNames
self.experimentType = "Not Labeled"
# Check if this is a labeled experiment.
# If it is, need to rework the columns names by adding info of non parental ion
if self.__isLabeledExperiment(ionsDetected):
self.experimentType = "Labeled"
# split groups if most abundant ion present
finalGroups = self.__getIonParentedGroups(ionsDetected)
if letter == "F":
startM = 0
else:
assert len(finalGroups) == 2, "For cholesterol experiment we only expect 2 parental ions!"
startM = -2
sortedIonNames = [(idx, f"C{ion['description'][:2]}:{ion['description'][2:]} ({group[0][1]['mass']}) M.{n}") for (key,group) in finalGroups for n,(idx, ion) in enumerate(group)]
orderedIdx,orderedIonNames = zip(*sortedIonNames)
# reorder the columns by ions
df_Data = df_Data.iloc[:, list(orderedIdx)]
self.dataColNames = orderedIonNames
# only parental ions for internalRefList
self.internalRefList = [ f"C{carbon.split('-')[0][:2]}:{carbon.split('-')[0][2:]} ({group[0][1]['mass']})" for (carbon, group) in finalGroups]
df_Data.columns = self.dataColNames
# get sample meta info from template file
df_TemplateInfo = self.__getExperimentMetaInfoFromMAP(templateMap)
assert len(df_TemplateInfo)==len(df_Data), \
f"The number of declared samples in the template (n={len(df_TemplateInfo)}) does not match the number of samples detected in the data file (n={len(df_Data)})"
# save the number of columns (meta info) before the actual data
self._dataStartIdx = len(df_Meta.columns)+len(df_TemplateInfo.columns)
if (letter == "F") | (self.experimentType == "Not Labeled"):
dataDf = pd.concat([df_Meta, df_TemplateInfo, df_Data.fillna(0)], axis=1)
else:
# if chol experiment, remove the M.-2 and M.-1
dataDf = pd.concat([df_Meta, df_TemplateInfo, df_Data.iloc[:, 2:].fillna(0)], axis=1)
# but save a copy with everything for posterity
self.dataDf_chol = pd.concat([df_Meta, df_TemplateInfo, df_Data.fillna(0)], axis=1)
return dataDf
def __getOrderedDfBasedOnTemplate(self, df, templateMap, letter="F", skipCols=7):
'''Get new df_Data and df_Meta based on template'''
# reorder rows based on template and reindex with range
newOrder = list(map(lambda x: f"{letter}{x.split('_')[1]}", templateMap.SampleID[templateMap.SampleName.dropna().index].values))[:len(df)]
df.index=df["Name"]
df = df.reindex(newOrder)
df.index = list(range(len(df)))
df_Meta = df[["Name", "Data File"]]
df_Data = df.iloc[:, skipCols:] # 7 first cols are info
return df_Meta, df_Data
def __getExperimentMetaInfoFromMAP(self, templateMap):
'''Return the meta info of the experiment'''
# keep only rows with declared names
declaredIdx = templateMap.SampleName.dropna().index
templateMap = templateMap.loc[declaredIdx]
templateMap.index = range(len(templateMap)) # in case there were missing values
# fill in missing weights with 1
templateMap.loc[templateMap.SampleWeight.isna(), "SampleWeight"]=1
return templateMap[["SampleID", "SampleName", "SampleWeight", "Comments"]]
def __getStandardsTemplateDf(self, sheetKeyword="STANDARD"):
'''Loads the correct sheet for standard and returns it'''
sheetName = f"{sheetKeyword}_{'_'.join(self.experimentType.upper().split(' '))}"
templateStandard = pd.read_excel(self.templateFileName, sheet_name=sheetName)
return templateStandard
def __makeResultFolder(self):
if self._cholesterol:
suffix = "-CHOL"
else:
suffix = ""
directory = f"{self.pathDirName}/results-{self._baseFileName}{suffix}"
if not os.path.exists(directory):
os.mkdir(directory)
return directory
##################
## Analysis and Updates
##################
def updateInternalRef(self, newInternalRef):
'''Update FAMES chosen as internal reference and normalize data to it'''
print(f"Internal Reference changed from {self.internalRef} to {newInternalRef}")
self.internalRef = newInternalRef
self.dataDf_norm = self.computeNormalizedData()
def updateStandards(self, volumeMixForPrep, volumeMixTotal, volumeStandards):
self.volumeMixForPrep = volumeMixForPrep
self.volumeMixTotal = volumeMixTotal
self.volumeStandards = volumeStandards
self.standardDf_nMoles = self.computeStandardMoles()
def computeStandardMoles(self):
'''Calculate nMoles for the standards'''
template = self.__standardDf_template.copy()
template["Conc in Master Mix (ug/ul)"] = template["Stock conc (ug/ul)"]*template["Weight (%)"]/100*self.volumeMixForPrep/self.volumeMixTotal
# concentration of each carbon per standard volume
for ul in self.volumeStandards:
template[f"Std-Conc-{ul}"]=ul*(template["Conc in Master Mix (ug/ul)"]+template["Extra"])
# nMol of each FAMES per standard vol
for ul in self.volumeStandards:
template[f"Std-nMol-{ul}"] = 1000*template[f"Std-Conc-{ul}"]/template["MW"]
# create a clean template with only masses and carbon name
templateClean = pd.concat([template.Chain, template.filter(like="Std-nMol")], axis=1).transpose()
templateClean.columns = [f"C{chain} ({int(mass)})" for chain,mass in zip(self.__standardDf_template.Chain, self.__standardDf_template.MW)]
templateClean = templateClean.iloc[1:]
return templateClean
def getStandardAbsorbance(self):
'''Get normalized absorbance data for standards'''
matchedLocations = self.dataDf_norm.SampleName.str.match('S[0-9]+', na=False)
return self.dataDf_norm.loc[matchedLocations]
def updateTracer(self, newTracer):
self.tracer = newTracer
print(f"The tracer has been updated to {newTracer}")
self.computeNACorrectionDf()
def updateTracerPurity(self, newPurity):
self.tracerPurity = newPurity
self.computeNACorrectionDf()
def updateNACMethod(self, newMethod):
self.NACMethod = newMethod
print(f"The correction method for natural abundance has been updated to {newMethod}")
self.computeNACorrectionDf()
def updateVolumesOfSampleDilution(self, newVolumeOfDilution, newVolumeOfSampleUsed, useValueDilution=True, useValueSample=True):
if useValueDilution:
self.volumesOfDilution = [newVolumeOfDilution]*self.numberOfSamples
if useValueSample:
self.volumesOfSampleSoupUsed = [newVolumeOfSampleUsed]*self.numberOfSamples
print(f"The volumes used for normalization have been updated:\n\tVolume of dilution: {self.volumesOfDilution}\n\tVolume of sample used: {self.volumesOfSampleSoupUsed}")
def updateVolumeOfDilutionFromTemplateFile(self, columnName, activated, variable="dilution", backupValueDilution=750, backupValueSample=5, useBackupDilution=True, useBackupSample=True):
templateMap = pd.read_excel(self.templateFileName, sheet_name="MAP")
declaredIdx = templateMap.SampleName.dropna()[templateMap.SampleName.dropna().str.match(self.__regexExpression["Samples"], na=False)].index
if ((variable == "dilution") & (activated)):
self.volumesOfDilution = templateMap.loc[declaredIdx, columnName].values
print(f"The dilution volumes used for normalization have been updated from template to {self.volumesOfDilution}")
assert len(self.volumesOfDilution[~np.isnan(self.volumesOfDilution)])==len(declaredIdx),\
f"The number of volume of dilutions declared in the Template file (n={len(self.volumesOfDilution[~np.isnan(self.volumesOfDilution)])}) is different than the number of samples declared (n={len(declaredIdx)})"
elif ((variable == "sample") & (activated)):
self.volumesOfSampleSoupUsed = templateMap.loc[declaredIdx, columnName].values
print(f"The sample volumes used for normalization have been updated from template to {self.volumesOfSampleSoupUsed}")
assert len(self.volumesOfSampleSoupUsed[~np.isnan(self.volumesOfSampleSoupUsed)])==len(declaredIdx),\
f"The number of sample volumes declared in the Template file (n={len(self.volumesOfSampleSoupUsed[~np.isnan(self.volumesOfSampleSoupUsed)])}) is different than the number of samples declared (n={len(declaredIdx)})"
else:
self.updateVolumesOfSampleDilution(backupValueDilution, backupValueSample, useBackupDilution, useBackupSample)
def updateNormalizationType(self, newType):
self.weightNormalization = bool(newType)
if (self.weightNormalization):
typeNorm = "by total weight"
else:
typeNorm = "by relative weight"
print(f"The normalization when computing the data has been changed to '{typeNorm}'")
# Debouncing active for computing the NA correction.
# Makes for a smoother user experience (no lagging) when ion purity are changed int he textbox
# Note that this means that the function will be called with the specified delay after parameters are changed
@debounce(1.1)
def computeNACorrectionDf(self):
self.dataDf_corrected = self.correctForNaturalAbundance()
self.dataDf_labeledProportions = self.calculateLabeledProportionForAll()
def computeNormalizedData(self):
'''Normalize the data to the internal ref'''
if self.experimentType == "Not Labeled":
dataDf_norm = self.dataDf.copy()
dataDf_norm.iloc[:, self._dataStartIdx:] = dataDf_norm.iloc[:, self._dataStartIdx:].divide(dataDf_norm[self.internalRef], axis=0)
else:
sumFracDf = self.calculateSumIonsForAll()
sumFracDf = sumFracDf.divide(sumFracDf[self.internalRef], axis=0)
# sumFracDf = pd.DataFrame(columns=sumFracDf.columns, data=sumFracDf.values/sumFracDf[self.internalRef].values[:, np.newaxis])
dataDf_norm = pd.concat([self.dataDf.iloc[:, :self._dataStartIdx], sumFracDf], axis=1)
return dataDf_norm
def correctForNaturalAbundance(self):
'''Correct all the data for natural abundance'''
correctedData = self.dataDf.iloc[:, :self._dataStartIdx]
for parentalIon in self.internalRefList:
ionMID = self.dataDf.filter(like=parentalIon)
if ionMID.shape[1]<=1:
# no clusters, go to next
print(parentalIon, "doesn't have non parental ions")
correctedData = pd.concat([correctedData, ionMID], axis=1)
continue
ionNA = NAProcess(parentalIon, self.tracer, purityTracer=self.tracerPurity, CHOL=self._cholesterol)
correctedIonData = ionNA.correctForNaturalAbundance(ionMID, method=self.NACMethod)
correctedData = pd.concat([correctedData, correctedIonData], axis=1)
print(f"The MIDs have been corrected using the {self.NACMethod} method (tracer: {self.tracer}, purity: {self.tracerPurity})")
return correctedData
def calculateSumIonsForAll(self):
'''Return df of the summed fractions for all the ions'''
if self._cholesterol:
dataToSum = self.dataDf_chol
else:
dataToSum = self.dataDf
sumFrac = pd.concat([dataToSum.filter(like=parentalIon).sum(axis=1) for parentalIon in self.internalRefList], axis=1)
sumFrac.columns = self.internalRefList
return sumFrac
def calculateLabeledProportion(self, df):
'''Calculate the proportion of labeling in non-parental ions (M.0 must be first column).'''
total = df.sum(axis=1)
return (total - df.iloc[:,0])/total
def calculateLabeledProportionForAll(self):
'''Return dataFrame of the labeling proportions for all the ions'''
proportions = pd.concat([self.calculateLabeledProportion(self.dataDf_corrected.filter(like=parentalIon)) for parentalIon in self.internalRefList], axis=1)
proportions.columns = self.internalRefList
return pd.concat([self.dataDf.iloc[:, :self._dataStartIdx], proportions], axis=1)
def saveStandardCurvesAndResults(self, useMask=False):
'''Save figures and all the relevant data'''
# get current folder and create result folder if needed
savePath = self.__makeResultFolder()
if not useMask:
extension = ""
else:
extension = "_modified"
self.dataDf_quantification = self.computeQuantificationFromStandardFits(useMask=useMask)
# index of data of interest (not the standards)
expDataLoc = self.dataDf_quantification.SampleName.str.match(self.__regexExpression["Samples"], na=False)
stdAbsorbance = self.getStandardAbsorbance().iloc[:, self._dataStartIdx:]
quantificationDf = self.dataDf_quantification.iloc[:, 3:]
nTotal = len(quantificationDf.columns)
# grid of plots
nCols = 4
if nTotal%4==0:
nRows = int(nTotal/nCols)
else:
nRows = int(np.floor(nTotal/nCols)+1)
# fig1 (only standards) and fig2 (standards + calculated FAMES concentration)
fig1,axes1 = plt.subplots(ncols=nCols, nrows=nRows, figsize=(20, nRows*4), constrained_layout=True)
fig2,axes2 = plt.subplots(ncols=nCols, nrows=nRows, figsize=(20, nRows*4), constrained_layout=True)
fig1.suptitle(f"Experiment: {self._baseFileName}")
fig2.suptitle(f"Experiment: {self._baseFileName}")
for i,(col,ax1,ax2) in enumerate(zip(quantificationDf.columns, axes1.flatten(), axes2.flatten())):
# if slope/intercept from standard are present for this ion, then continue
slope,intercept,r2 = self.standardDf_fitResults.loc[["slope", "intercept", "R2"], col]
# standards values and fits
try:
xvals = self.standardDf_nMoles[col].values
except:
# if error, it means that parental ion data were used
parentalIon = self._checkIfParentalIonDataExistsFor(col)[1]
xvals = self.standardDf_nMoles[parentalIon].values
yvals = stdAbsorbance[col].values
try:
mask = self._maskFAMES[col]["newMask"]
except:
mask = self._maskFAMES[col]["originalMask"]
xfit = [np.min(xvals), np.max(xvals)]
yfit = np.polyval([slope, intercept], xfit)
# Fig 1
ax1.plot(xvals[mask], yvals[mask], "o", color="#00BFFF")
ax1.plot(xvals[[not i for i in mask]], yvals[[not i for i in mask]], "o", mfc="none", color="black", mew=2)
ax1.plot(xfit, yfit, "-", color="#fb4c52")
ax1.text(ax1.get_xlim()[0]+(ax1.get_xlim()[1]-ax1.get_xlim()[0])*0.05, ax1.get_ylim()[0]+(ax1.get_ylim()[1]-ax1.get_ylim()[0])*0.9, f"R2={r2:.4f}", size=14, color="#ce4ad0")
ax1.text(ax1.get_xlim()[0]+(ax1.get_xlim()[1]-ax1.get_xlim()[0])*0.97, ax1.get_ylim()[0]+(ax1.get_ylim()[1]-ax1.get_ylim()[0])*0.05, f"y={slope:.4f}x+{intercept:.4f}", size=14, color="#fb4c52", ha="right")
ax1.set_title(col)
ax1.set_xlabel("Quantity (nMoles)")
ax1.set_ylabel("Absorbance")
# Fig 2
ax2.plot(xvals[mask], yvals[mask], "o", color="#00BFFF")
ax2.plot(xvals[[not i for i in mask]], yvals[[not i for i in mask]], "x", color="black", ms=3)
ax2.plot(xfit, yfit, "-", color="#fb4c52")
# add values calculated from curve (visually adjust for normalization by weight done above)
ax2.plot(quantificationDf.loc[expDataLoc, col], self.dataDf_norm.loc[expDataLoc, col], "o", color="#FF8B22", alpha=0.3)
ax2.text(ax2.get_xlim()[0]+(ax2.get_xlim()[1]-ax2.get_xlim()[0])*0.05, ax2.get_ylim()[0]+(ax2.get_ylim()[1]-ax2.get_ylim()[0])*0.9, f"R2={r2:.4f}", size=14, color="#ce4ad0")
ax2.text(ax2.get_xlim()[0]+(ax2.get_xlim()[1]-ax2.get_xlim()[0])*0.97, ax2.get_ylim()[0]+(ax2.get_ylim()[1]-ax2.get_ylim()[0])*0.05, f"y={slope:.4f}x+{intercept:.4f}", size=14, color="#fb4c52", ha="right")
ax2.set_title(col)
ax2.set_xlabel("Quantity (nMoles)")
ax2.set_ylabel("Absorbance")
#####################
# Save data and figures
# save figures
fig1.savefig(f"{savePath}/standard-fit{extension}.pdf")
fig2.savefig(f"{savePath}/standard-fit-with-data{extension}.pdf")
# close Matplotlib processes
plt.close('all')
# Create a Pandas Excel writer using XlsxWriter as the engine.
if self._cholesterol:
suffix = "-CHOL"
else:
suffix = ""
writer = pd.ExcelWriter(f"{savePath}/results-{self._baseFileName}{suffix}{extension}.xlsx", engine='xlsxwriter')
normalization = self.getNormalizationArray()
# Write each dataframe to a different worksheet.
# standards
standards = self.getConcatenatedStandardResults()
standards.to_excel(writer, sheet_name='Standards', index=True)
# data
self.dataDf_quantification.loc[expDataLoc].to_excel(writer, sheet_name='QuantTotal_nMoles', index=False)
resNorm = pd.concat([self.dataDf_norm["SampleID"], self.dataDf_norm["SampleName"], self.dataDf_norm["Comments"], quantificationDf.divide(normalization, axis=0)], axis=1)
resNorm.loc[expDataLoc].to_excel(writer, sheet_name='QuantTotal_nMoles_mg', index=False)
if self.experimentType == "Labeled":
newlySynthetizedMoles = quantificationDf*self.dataDf_labeledProportions[quantificationDf.columns]
res_newlySynthetizedMoles = pd.concat([self.dataDf_norm["SampleID"], self.dataDf_norm["SampleName"], self.dataDf_norm["Comments"], newlySynthetizedMoles], axis=1)
# uL of liver soup used = 5uL (the initial liver was diluted in 750)
res_newlySynthetizedMoles_norm = pd.concat([self.dataDf_norm["SampleID"], self.dataDf_norm["SampleName"], self.dataDf_norm["Comments"], newlySynthetizedMoles.divide(normalization, axis=0)], axis=1)
res_newlySynthetizedMoles.loc[expDataLoc].to_excel(writer, sheet_name='QuantSynthetized_nMoles', index=False)
res_newlySynthetizedMoles_norm.loc[expDataLoc].to_excel(writer, sheet_name='QuantSynthetized_nMoles_mg', index=False)
labeledProp = self.dataDf_labeledProportions[["SampleID", "SampleName", "Comments", *self.dataDf_labeledProportions.columns[self._dataStartIdx:]]]
labeledProp.loc[expDataLoc].to_excel(writer, sheet_name='PercentageSynthetized', index=False)
if (self._cholesterol) & (self.experimentType=="Labeled"):
originalData = self.dataDf_chol
else:
originalData = self.dataDf
originalData.to_excel(writer, sheet_name='OriginalData', index=False)
self.dataDf_norm.to_excel(writer, sheet_name='OriginalData_normToInternalRef', index=False)
# add a sheet with experiment log
nSamples = len(self.volumesOfDilution)
nVolumesStandards = len(self.volumeStandards)
# keep the lengthiest to use to extend other arrays
nMax = nSamples if (nSamples >= nVolumesStandards) else nVolumesStandards
log = {
"Experiment type": [self.experimentType] + [np.nan]*(nMax-1),
"Volume Mix Total": [self.volumeMixTotal] + [np.nan]*(nMax-1),
"Volume Mix Used": [self.volumeMixForPrep] + [np.nan]*(nMax-1),
"Internal Reference": [self.internalRef] + [np.nan]*(nMax-1),
"Volume standards": list(self.volumeStandards) + [np.nan]*(nMax-len(self.volumeStandards)),
"Volume of Dilution": list(self.volumesOfDilution) + [np.nan]*(nMax-len(self.volumesOfDilution)),
"Volume of Sample Measured": list(self.volumesOfSampleSoupUsed) + [np.nan]*(nMax-len(self.volumesOfSampleSoupUsed)),
"Normalization": ["Weigth only" if self.weightNormalization else "Relative Weight"] + [np.nan]*(nMax-1),
"Isotope tracer": [self.tracer] + [np.nan]*(nMax-1),
"Isotope tracer purity": list(self.tracerPurity) + [np.nan]*(nMax-len(self.tracerPurity))
}
| pd.DataFrame(log) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 12:04:39 2018
@author: saintlyvi
"""
import time
import pandas as pd
import numpy as np
from sklearn.cluster import MiniBatchKMeans, KMeans
import somoclu
from experiment.algorithms.cluster_prep import xBins, preprocessX, clusterStats, bestClusters, saveLabels, saveResults
def kmeans(X, range_n_clusters, top_lbls=10, preprocessing = None, bin_X=False, experiment_name=None):
"""
This function applies the MiniBatchKmeans algorithm from sklearn on inputs X for range_n_clusters.
If preprossing = True, X is normalised with sklearn.preprocessing.normalize()
Returns cluster stats, cluster centroids and cluster labels.
"""
if experiment_name is None:
save = False
else:
if preprocessing is None:
pass
else:
experiment_name = experiment_name+'_'+ preprocessing
save = True
#apply pre-binning
if bin_X != False:
Xbin = xBins(X, bin_X)
else:
Xbin = {'all':X}
for b, ids in Xbin.items():
try:
A = X.loc[ids,:]
except:
A = ids
#apply preprocessing
A = preprocessX(A, norm=preprocessing)
centroids = pd.DataFrame()
stats = pd.DataFrame()
cluster_lbls = pd.DataFrame()
dim = 0 #set dim to 0 to match SOM formating
cluster_lbls_dim = {}
stats_dim = pd.DataFrame()
for n_clust in range_n_clusters:
clusterer = MiniBatchKMeans(n_clusters=n_clust, random_state=10)
#train clustering algorithm
tic = time.time()
clusterer.fit(A)
cluster_labels = clusterer.predict(A)
toc = time.time()
## Calculate scores
cluster_stats = clusterStats({}, n_clust, A, cluster_labels,
preprocessing = preprocessing, transform = None,
tic = tic, toc = toc)
cluster_centroids = clusterer.cluster_centers_
eval_results, centroid_results = saveResults(experiment_name, cluster_stats,
cluster_centroids, dim, b, save)
stats_dim = stats_dim.append(eval_results)
centroids = centroids.append(centroid_results)
cluster_lbls_dim[n_clust] = cluster_labels
#outside n_clust loop
best_clusters, best_stats = bestClusters(cluster_lbls_dim, stats_dim, top_lbls)
cluster_lbls = pd.concat([cluster_lbls, best_clusters], axis=1)
stats = pd.concat([stats, best_stats], axis=0)
stats.reset_index(drop=True, inplace=True)
if save is True:
saveLabels(cluster_lbls, stats)
return stats, centroids, cluster_lbls
def som(X, range_n_dim, top_lbls=10, preprocessing = None, bin_X=False, transform=None, experiment_name=None, **kwargs):
"""
This function applies the self organising maps algorithm from somoclu on inputs X over square maps of range_n_dim.
If preprossing = True, X is normalised with sklearn.preprocessing.normalize()
If kmeans = True, the KMeans algorithm from sklearn is applied to the SOM and returns clusters
kwargs can be n_clusters = range(start, end, interval) OR list()
Returns cluster stats, cluster centroids and cluster labels.
"""
for dim in range_n_dim:
limit = int(np.sqrt(len(X)/20))
if dim > limit: #verify that number of nodes are sensible for size of input data
return print('Input size too small for map. Largest n should be ' + str(limit))
else:
pass
if experiment_name is None:
save = False
else:
if preprocessing is None:
pass
else:
experiment_name = experiment_name+'_'+ preprocessing
save = True
#apply pre-binning
if bin_X != False:
Xbin = xBins(X, bin_X)
else:
Xbin = {'0-4000':X}
for b, ids in Xbin.items():
try:
A = X.loc[ids,:]
except:
A = ids
#apply preprocessing
A = preprocessX(A, norm=preprocessing)
centroids = pd.DataFrame()
stats = pd.DataFrame()
cluster_lbls = pd.DataFrame()
for dim in range_n_dim:
cluster_lbls_dim = {}
stats_dim = pd.DataFrame()
nrow = ncol = dim
tic = time.time()
#train clustering algorithm
som = somoclu.Somoclu(nrow, ncol, compactsupport=False, maptype='planar')
som.train(A)
toc = time.time()
if transform == None:
n_clust = [0]
elif transform == 'kmeans':
if kwargs is None:
n_clust = [10]
else:
for key, value in kwargs.items(): #create list with number of clusters for kmeans
if key == 'n_clusters':
n_clust = value
else:
return('Cannot process this transform algorithm')
for n in n_clust:
if n == 0:
#create empty matrix the size of the SOM
m = np.arange(0, nrow*ncol, 1).reshape(nrow, ncol)
else:
clusterer = KMeans(n_clusters=n, random_state=10)
som.cluster(algorithm=clusterer)
m = som.clusters
#get cluster of SOM node and assign to input vecors based on bmus
k = [m[som.bmus[i][1],som.bmus[i][0]] for i in range(0, len(som.bmus))]
c = pd.DataFrame(A).assign(cluster=k).groupby('cluster').mean()
#calculate scores
cluster_stats = clusterStats({}, n, A, cluster_labels = k, preprocessing = preprocessing,
transform = transform, tic = tic, toc = toc)
cluster_centroids = np.array(c)
eval_results, centroid_results = saveResults(experiment_name, cluster_stats,
cluster_centroids, dim, b, save)
stats_dim = stats_dim.append(eval_results)
centroids = centroids.append(centroid_results)
cluster_lbls_dim[n] = k
#outside n_clust loop
best_clusters, best_stats = bestClusters(cluster_lbls_dim, stats_dim, top_lbls)
cluster_lbls = | pd.concat([cluster_lbls, best_clusters],axis=1) | pandas.concat |
from abc import ABC, abstractmethod
from enum import Enum, auto
from math import sqrt
from pathlib import Path
from typing import Callable, ClassVar, Dict, Optional, Tuple, Type
import pandas
from pandas import DataFrame, Series
from ..util.integrity import recursive_sha256
from .filetype import Csv, FileType
from .reader import CsvReader, Reader
class Activity(Enum):
WALKING = auto()
JOGGING = auto()
UPSTAIRS = auto()
DOWNSTAIRS = auto()
SITTING = auto()
STANDING = auto()
class Dataset(ABC):
ACTIVITY_COLUMN: ClassVar[str] = NotImplemented
ACTIVITIES: ClassVar[Dict[Activity, str]] = NotImplemented
COLUMNS: ClassVar[Dict[str, Reader.DataType]] = NotImplemented
FREQUENCY: ClassVar[int] = NotImplemented
TRIAL_COLUMN: ClassVar[str] = NotImplemented
SUBJECT_COLUMN: ClassVar[str] = NotImplemented
@classmethod
@abstractmethod
def generators(cls) -> Optional[Dict[str, Callable[[DataFrame], Series]]]:
raise NotImplementedError
@classmethod
def is_columns_valid(cls) -> bool:
generators = cls.generators()
if not generators:
return True
return all(gen_key in cls.COLUMNS.keys() for gen_key in generators.keys())
def __init__(self, path: Path):
if not self.is_columns_valid():
raise ValueError("All generator keys must be specified in column field")
self.path = path
@property
def hash(self) -> str:
return recursive_sha256(self.path)
@classmethod
def enumerate_activities(cls) -> Dict[str, int]:
label_list = list(cls.ACTIVITIES.values())
label_list.sort()
return {label: i for i, label in enumerate(label_list)}
@abstractmethod
def read(self) -> DataFrame:
pass
class Wisdm(Dataset):
ACTIVITIES = {
Activity.WALKING: "Walking",
Activity.JOGGING: "Jogging",
Activity.UPSTAIRS: "Upstairs",
Activity.DOWNSTAIRS: "Downstairs",
Activity.SITTING: "Sitting",
Activity.STANDING: "Standing",
}
ACTIVITY_COLUMN = "activity"
TRIAL_COLUMN = "trial"
SUBJECT_COLUMN = "subject"
COLUMNS = {
"user": Reader.DataType.CATEGORY,
"activity": Reader.DataType.CATEGORY,
"timestamp": Reader.DataType.INT64,
"xaccel": Reader.DataType.FLOAT64,
"yaccel": Reader.DataType.FLOAT64,
"zaccel": Reader.DataType.FLOAT64,
"magnitude": Reader.DataType.FLOAT64,
"xaccel_norm": Reader.DataType.FLOAT64,
"yaccel_norm": Reader.DataType.FLOAT64,
"zaccel_norm": Reader.DataType.FLOAT64,
"magnitude_norm": Reader.DataType.FLOAT64,
}
FREQUENCY = 20
@classmethod
def generators(cls) -> Optional[Dict[str, Callable[[DataFrame], Series]]]:
def magnitude(df: DataFrame) -> Series:
xacc = df["xaccel"]
yacc = df["yaccel"]
zacc = df["zaccel"]
euclidean = (xacc ** 2 + yacc ** 2 + zacc ** 2) ** 0.5
return Series(abs(euclidean - 10))
def normalize(series: Series) -> Series:
return Series((series - series.mean()) / (series.max() - series.min()))
return {
"magnitude": magnitude,
"xaccel_norm": lambda df: normalize(df["xaccel"]),
"yaccel_norm": lambda df: normalize(df["yaccel"]),
"zaccel_norm": lambda df: normalize(df["zaccel"]),
"magnitude_norm": lambda df: normalize(magnitude(df)),
}
def __init__(self, path: Path) -> None:
Dataset.__init__(self, path)
def read(self) -> DataFrame:
reader = CsvReader(self.path)
return reader.read(self.COLUMNS)
class MotionSense(Dataset):
ACTIVITIES = {
Activity.WALKING: "wlk",
Activity.JOGGING: "jog",
Activity.UPSTAIRS: "ups",
Activity.DOWNSTAIRS: "dws",
Activity.SITTING: "sit",
Activity.STANDING: "std",
}
COLUMNS = {
"subject": Reader.DataType.INT64,
"trial": Reader.DataType.INT64,
"activity": Reader.DataType.CATEGORY,
"attitude.roll": Reader.DataType.FLOAT64,
"attitude.pitch": Reader.DataType.FLOAT64,
"attitude.yaw": Reader.DataType.FLOAT64,
"gravity.x": Reader.DataType.FLOAT64,
"gravity.y": Reader.DataType.FLOAT64,
"gravity.z": Reader.DataType.FLOAT64,
"rotationRate.x": Reader.DataType.FLOAT64,
"rotationRate.y": Reader.DataType.FLOAT64,
"rotationRate.z": Reader.DataType.FLOAT64,
"xrot_norm": Reader.DataType.FLOAT64,
"yrot_norm": Reader.DataType.FLOAT64,
"zrot_norm": Reader.DataType.FLOAT64,
"userAcceleration.x": Reader.DataType.FLOAT64,
"userAcceleration.y": Reader.DataType.FLOAT64,
"userAcceleration.z": Reader.DataType.FLOAT64,
"magnitude": Reader.DataType.FLOAT64,
"xaccel_norm": Reader.DataType.FLOAT64,
"yaccel_norm": Reader.DataType.FLOAT64,
"zaccel_norm": Reader.DataType.FLOAT64,
"magnitude_norm": Reader.DataType.FLOAT64,
"gravity.x.real": Reader.DataType.FLOAT64,
"gravity.y.real": Reader.DataType.FLOAT64,
"gravity.z.real": Reader.DataType.FLOAT64,
"userAcceleration.x.real": Reader.DataType.FLOAT64,
"userAcceleration.y.real": Reader.DataType.FLOAT64,
"userAcceleration.z.real": Reader.DataType.FLOAT64,
}
FREQUENCY = 50
ACTIVITY_COLUMN = "activity"
SUBJECT_COLUMN = "subject"
TRIAL_COLUMN = "trial"
@classmethod
def generators(cls) -> Dict[str, Callable[[DataFrame], Series]]:
def magnitude(df: DataFrame) -> Series:
xacc = df["userAcceleration.x"]
yacc = df["userAcceleration.y"]
zacc = df["userAcceleration.z"]
euclidean = (xacc ** 2 + yacc ** 2 + zacc ** 2) ** 0.5
return Series(euclidean)
def normalize(series: Series) -> Series:
return Series((series - series.mean()) / (series.max() - series.min()))
def ms_squarize(series: Series) -> Series:
return series.multiply(10)
return {
"magnitude": magnitude,
"xaccel_norm": lambda df: normalize(df["userAcceleration.x"]),
"yaccel_norm": lambda df: normalize(df["userAcceleration.y"]),
"zaccel_norm": lambda df: normalize(df["userAcceleration.z"]),
"xrot_norm": lambda df: normalize(df["rotationRate.x"]),
"yrot_norm": lambda df: normalize(df["rotationRate.y"]),
"zrot_norm": lambda df: normalize(df["rotationRate.z"]),
"magnitude_norm": lambda df: normalize(magnitude(df)),
"gravity.x.real": lambda df: ms_squarize(df["gravity.x"]),
"gravity.y.real": lambda df: ms_squarize(df["gravity.y"]),
"gravity.z.real": lambda df: ms_squarize(df["gravity.z"]),
"userAcceleration.x.real": lambda df: ms_squarize(df["userAcceleration.x"]),
"userAcceleration.y.real": lambda df: ms_squarize(df["userAcceleration.y"]),
"userAcceleration.z.real": lambda df: ms_squarize(df["userAcceleration.z"]),
}
def __init__(self, path: Path) -> None:
Dataset.__init__(self, path)
def read(self) -> DataFrame:
pandas_columns = {
name: type_enum.value for name, type_enum in self.COLUMNS.items()
}
concated = | DataFrame(columns=pandas_columns) | pandas.DataFrame |
"""Unit tests for soundings.py."""
import copy
import unittest
import numpy
import pandas
from gewittergefahr.gg_utils import soundings
from gewittergefahr.gg_utils import nwp_model_utils
from gewittergefahr.gg_utils import storm_tracking_utils as tracking_utils
from gewittergefahr.gg_utils import temperature_conversions
from gewittergefahr.gg_utils import moisture_conversions
TOLERANCE = 1e-6
TOLERANCE_FOR_CONVERTED_VALUES = 1e-3
# The following constants are used to test _get_nwp_fields_for_sounding.
MINIMUM_PRESSURE_MB = 950.
MODEL_NAME = nwp_model_utils.RAP_MODEL_NAME
SURFACE_HEIGHT_NAME, SURFACE_HEIGHT_NAME_GRIB1 = (
nwp_model_utils.get_lowest_height_name(MODEL_NAME)
)
SURFACE_TEMP_NAME, SURFACE_TEMP_NAME_GRIB1 = (
nwp_model_utils.get_lowest_temperature_name(MODEL_NAME)
)
SURFACE_HUMIDITY_NAME, SURFACE_HUMIDITY_NAME_GRIB1 = (
nwp_model_utils.get_lowest_humidity_name(MODEL_NAME)
)
SURFACE_U_WIND_NAME, SURFACE_U_WIND_NAME_GRIB1 = (
nwp_model_utils.get_lowest_u_wind_name(MODEL_NAME)
)
SURFACE_V_WIND_NAME, SURFACE_V_WIND_NAME_GRIB1 = (
nwp_model_utils.get_lowest_v_wind_name(MODEL_NAME)
)
SURFACE_PRESSURE_NAME, SURFACE_PRESSURE_NAME_GRIB1 = (
nwp_model_utils.get_lowest_pressure_name(MODEL_NAME)
)
FIELD_NAMES_WITH_SURFACE = [
'geopotential_height_metres_950mb', 'geopotential_height_metres_975mb',
'geopotential_height_metres_1000mb', SURFACE_HEIGHT_NAME,
'temperature_kelvins_950mb', 'temperature_kelvins_975mb',
'temperature_kelvins_1000mb', SURFACE_TEMP_NAME,
'relative_humidity_percent_950mb', 'relative_humidity_percent_975mb',
'relative_humidity_percent_1000mb', SURFACE_HUMIDITY_NAME,
'u_wind_m_s01_950mb', 'u_wind_m_s01_975mb', 'u_wind_m_s01_1000mb',
SURFACE_U_WIND_NAME,
'v_wind_m_s01_950mb', 'v_wind_m_s01_975mb', 'v_wind_m_s01_1000mb',
SURFACE_V_WIND_NAME,
SURFACE_PRESSURE_NAME
]
FIELD_NAMES_NO_SURFACE = [
'geopotential_height_metres_950mb', 'geopotential_height_metres_975mb',
'geopotential_height_metres_1000mb',
'temperature_kelvins_950mb', 'temperature_kelvins_975mb',
'temperature_kelvins_1000mb',
'relative_humidity_percent_950mb', 'relative_humidity_percent_975mb',
'relative_humidity_percent_1000mb',
'u_wind_m_s01_950mb', 'u_wind_m_s01_975mb', 'u_wind_m_s01_1000mb',
'v_wind_m_s01_950mb', 'v_wind_m_s01_975mb', 'v_wind_m_s01_1000mb'
]
FIELD_NAMES_WITH_SURFACE_GRIB1 = [
'HGT:950 mb', 'HGT:975 mb', 'HGT:1000 mb', SURFACE_HEIGHT_NAME_GRIB1,
'TMP:950 mb', 'TMP:975 mb', 'TMP:1000 mb', SURFACE_TEMP_NAME_GRIB1,
'RH:950 mb', 'RH:975 mb', 'RH:1000 mb', SURFACE_HUMIDITY_NAME_GRIB1,
'UGRD:950 mb', 'UGRD:975 mb', 'UGRD:1000 mb', SURFACE_U_WIND_NAME_GRIB1,
'VGRD:950 mb', 'VGRD:975 mb', 'VGRD:1000 mb', SURFACE_V_WIND_NAME_GRIB1,
SURFACE_PRESSURE_NAME_GRIB1
]
FIELD_NAMES_NO_SURFACE_GRIB1 = [
'HGT:950 mb', 'HGT:975 mb', 'HGT:1000 mb',
'TMP:950 mb', 'TMP:975 mb', 'TMP:1000 mb',
'RH:950 mb', 'RH:975 mb', 'RH:1000 mb',
'UGRD:950 mb', 'UGRD:975 mb', 'UGRD:1000 mb',
'VGRD:950 mb', 'VGRD:975 mb', 'VGRD:1000 mb'
]
HEIGHT_NAMES_NO_SURFACE = [
'geopotential_height_metres_950mb', 'geopotential_height_metres_975mb',
'geopotential_height_metres_1000mb'
]
TEMPERATURE_NAMES_NO_SURFACE = [
'temperature_kelvins_950mb', 'temperature_kelvins_975mb',
'temperature_kelvins_1000mb'
]
HUMIDITY_NAMES_NO_SURFACE = [
'relative_humidity_percent_950mb', 'relative_humidity_percent_975mb',
'relative_humidity_percent_1000mb'
]
U_WIND_NAMES_NO_SURFACE = [
'u_wind_m_s01_950mb', 'u_wind_m_s01_975mb', 'u_wind_m_s01_1000mb'
]
V_WIND_NAMES_NO_SURFACE = [
'v_wind_m_s01_950mb', 'v_wind_m_s01_975mb', 'v_wind_m_s01_1000mb'
]
PRESSURE_LEVELS_NO_SURFACE_MB = numpy.array([950, 975, 1000], dtype=float)
THIS_DICT = {
soundings.PRESSURE_LEVEL_KEY: numpy.concatenate((
PRESSURE_LEVELS_NO_SURFACE_MB, numpy.array([numpy.nan])
)),
nwp_model_utils.HEIGHT_COLUMN_FOR_SOUNDINGS:
HEIGHT_NAMES_NO_SURFACE + [SURFACE_HEIGHT_NAME],
nwp_model_utils.TEMPERATURE_COLUMN_FOR_SOUNDINGS:
TEMPERATURE_NAMES_NO_SURFACE + [SURFACE_TEMP_NAME],
nwp_model_utils.RH_COLUMN_FOR_SOUNDINGS:
HUMIDITY_NAMES_NO_SURFACE + [SURFACE_HUMIDITY_NAME],
nwp_model_utils.U_WIND_COLUMN_FOR_SOUNDINGS:
U_WIND_NAMES_NO_SURFACE + [SURFACE_U_WIND_NAME],
nwp_model_utils.V_WIND_COLUMN_FOR_SOUNDINGS:
V_WIND_NAMES_NO_SURFACE + [SURFACE_V_WIND_NAME]
}
FIELD_NAME_TABLE_WITH_SURFACE = pandas.DataFrame.from_dict(THIS_DICT)
THIS_DICT = {
soundings.PRESSURE_LEVEL_KEY: PRESSURE_LEVELS_NO_SURFACE_MB,
nwp_model_utils.HEIGHT_COLUMN_FOR_SOUNDINGS: HEIGHT_NAMES_NO_SURFACE,
nwp_model_utils.TEMPERATURE_COLUMN_FOR_SOUNDINGS:
TEMPERATURE_NAMES_NO_SURFACE,
nwp_model_utils.RH_COLUMN_FOR_SOUNDINGS: HUMIDITY_NAMES_NO_SURFACE,
nwp_model_utils.U_WIND_COLUMN_FOR_SOUNDINGS: U_WIND_NAMES_NO_SURFACE,
nwp_model_utils.V_WIND_COLUMN_FOR_SOUNDINGS: V_WIND_NAMES_NO_SURFACE
}
FIELD_NAME_TABLE_NO_SURFACE = pandas.DataFrame.from_dict(THIS_DICT)
# The following constants are used to test _create_target_points_for_interp.
THESE_FULL_ID_STRINGS = ['A', 'B', 'C', 'A', 'B', 'C']
THESE_TIMES_UNIX_SEC = numpy.array([0, 0, 0, 1, 1, 1], dtype=int)
THESE_LATITUDES_DEG = numpy.array([50, 55, 60, 51, 56, 61], dtype=float)
THESE_LONGITUDES_DEG = numpy.array([250, 260, 270, 251, 261, 271], dtype=float)
THESE_EAST_VELOCITIES_M_S01 = numpy.full(6, 10000, dtype=float)
THESE_NORTH_VELOCITIES_M_S01 = numpy.full(6, 10000, dtype=float)
THIS_DICT = {
tracking_utils.FULL_ID_COLUMN: THESE_FULL_ID_STRINGS,
tracking_utils.VALID_TIME_COLUMN: THESE_TIMES_UNIX_SEC,
tracking_utils.CENTROID_LATITUDE_COLUMN: THESE_LATITUDES_DEG,
tracking_utils.CENTROID_LONGITUDE_COLUMN: THESE_LONGITUDES_DEG,
tracking_utils.EAST_VELOCITY_COLUMN: THESE_EAST_VELOCITIES_M_S01,
tracking_utils.NORTH_VELOCITY_COLUMN: THESE_NORTH_VELOCITIES_M_S01
}
DUMMY_STORM_OBJECT_TABLE = pandas.DataFrame.from_dict(THIS_DICT)
UNIQUE_LEAD_TIMES_SECONDS = numpy.array([0, 1], dtype=int)
THESE_FULL_ID_STRINGS = [
'A', 'B', 'C', 'A', 'B', 'C', 'A', 'B', 'C', 'A', 'B', 'C'
]
THESE_INIT_TIMES_UNIX_SEC = numpy.array(
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=int
)
THESE_LATITUDES_DEG = numpy.array([
50, 55, 60, 51, 56, 61, 50.08981978, 55.08972691, 60.08963404, 51.08980123,
56.08970834, 61.08961544
], dtype=float)
THESE_LONGITUDES_DEG = numpy.array([
250, 260, 270, 251, 261, 271, 250.13973873, 260.15661394, 270.17969769,
251.14273048, 261.16064721, 271.18533962
], dtype=float)
THESE_VALID_TIMES_UNIX_SEC = numpy.array(
[0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2], dtype=int
)
THESE_LEAD_TIMES_SECONDS = numpy.array(
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], dtype=int
)
THESE_EAST_VELOCITIES_M_S01 = numpy.full(12, 10000, dtype=float)
THESE_NORTH_VELOCITIES_M_S01 = numpy.full(12, 10000, dtype=float)
THIS_DICT = {
tracking_utils.FULL_ID_COLUMN: THESE_FULL_ID_STRINGS,
soundings.INITIAL_TIME_COLUMN: THESE_INIT_TIMES_UNIX_SEC,
tracking_utils.CENTROID_LATITUDE_COLUMN: THESE_LATITUDES_DEG,
tracking_utils.CENTROID_LONGITUDE_COLUMN: THESE_LONGITUDES_DEG,
soundings.FORECAST_TIME_COLUMN: THESE_VALID_TIMES_UNIX_SEC,
soundings.LEAD_TIME_KEY: THESE_LEAD_TIMES_SECONDS,
tracking_utils.EAST_VELOCITY_COLUMN: THESE_EAST_VELOCITIES_M_S01,
tracking_utils.NORTH_VELOCITY_COLUMN: THESE_NORTH_VELOCITIES_M_S01
}
DUMMY_TARGET_POINT_TABLE = pandas.DataFrame.from_dict(THIS_DICT)
# The following constants are used to test _convert_interp_table_to_soundings.
THIS_MATRIX = numpy.array([
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
[2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
], dtype=float)
INTERP_TABLE_NO_SURFACE = pandas.DataFrame(THIS_MATRIX)
THESE_FULL_ID_STRINGS = ['a', 'b']
THESE_INIT_TIMES_UNIX_SEC = numpy.array([10, 10], dtype=int)
THESE_LEAD_TIMES_SECONDS = numpy.array([5, 5], dtype=int)
THIS_DICT = {
tracking_utils.FULL_ID_COLUMN: THESE_FULL_ID_STRINGS,
soundings.INITIAL_TIME_COLUMN: THESE_INIT_TIMES_UNIX_SEC,
soundings.LEAD_TIME_KEY: THESE_LEAD_TIMES_SECONDS
}
TARGET_POINT_TABLE = pandas.DataFrame.from_dict(THIS_DICT)
for k in range(len(FIELD_NAMES_NO_SURFACE)):
INTERP_TABLE_NO_SURFACE.rename(
columns={k: FIELD_NAMES_NO_SURFACE[k]}, inplace=True
)
THIS_FIRST_MATRIX = numpy.array([
[0, 6, 3, 9, 12],
[1, 7, 4, 10, 13],
[2, 8, 5, 11, 14]
], dtype=int)
THIS_SECOND_MATRIX = numpy.array([
[2, 14, 8, 20, 26],
[4, 16, 10, 22, 28],
[6, 18, 12, 24, 30]
], dtype=int)
THIS_SOUNDING_MATRIX = numpy.stack(
(THIS_FIRST_MATRIX, THIS_SECOND_MATRIX), axis=0
)
THESE_PRESSURE_LEVELS_MB = numpy.array([950, 975, 1000])
THESE_FIELD_NAMES = [
nwp_model_utils.HEIGHT_COLUMN_FOR_SOUNDINGS,
nwp_model_utils.RH_COLUMN_FOR_SOUNDINGS,
nwp_model_utils.TEMPERATURE_COLUMN_FOR_SOUNDINGS,
nwp_model_utils.U_WIND_COLUMN_FOR_SOUNDINGS,
nwp_model_utils.V_WIND_COLUMN_FOR_SOUNDINGS
]
SOUNDING_DICT_P_COORDS_NO_SURFACE = {
soundings.FULL_IDS_KEY: THESE_FULL_ID_STRINGS,
soundings.INITIAL_TIMES_KEY: THESE_INIT_TIMES_UNIX_SEC,
soundings.LEAD_TIMES_KEY: THESE_LEAD_TIMES_SECONDS,
soundings.SOUNDING_MATRIX_KEY: THIS_SOUNDING_MATRIX,
soundings.SURFACE_PRESSURES_KEY: None,
soundings.PRESSURE_LEVELS_WITH_SFC_KEY: THESE_PRESSURE_LEVELS_MB,
soundings.FIELD_NAMES_KEY: THESE_FIELD_NAMES
}
THIS_MATRIX = numpy.array([
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
2000],
[2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40,
4200]
], dtype=float)
INTERP_TABLE_WITH_SURFACE = | pandas.DataFrame(THIS_MATRIX) | pandas.DataFrame |
import argparse
import logging
import os
import json
import boto3
import subprocess
import sys
from urllib.parse import urlparse
os.system('pip install autogluon')
from autogluon import TabularPrediction as task
import pandas as pd # this should come after the pip install.
logging.basicConfig(level=logging.DEBUG)
logging.info(subprocess.call('ls -lR /opt/ml/input'.split()))
# ------------------------------------------------------------ #
# Training methods #
# ------------------------------------------------------------ #
def train(args):
# SageMaker passes num_cpus, num_gpus and other args we can use to tailor training to
# the current container environment, but here we just use simple cpu context.
num_gpus = int(os.environ['SM_NUM_GPUS'])
current_host = args.current_host
hosts = args.hosts
model_dir = args.model_dir
target = args.target
# load training and validation data
training_dir = args.train
filename = args.filename
logging.info(training_dir)
train_data = task.Dataset(file_path=training_dir + '/' + filename)
predictor = task.fit(train_data = train_data, label=target, output_directory=model_dir)
return predictor
# ------------------------------------------------------------ #
# Hosting methods #
# ------------------------------------------------------------ #
def model_fn(model_dir):
"""
Load the gluon model. Called once when hosting service starts.
:param: model_dir The directory where model files are stored.
:return: a model (in this case an AutoGluon network)
"""
net = task.load(model_dir)
return net
def transform_fn(net, data, input_content_type, output_content_type):
"""
Transform a request using the Gluon model. Called once per request.
:param net: The AutoGluon model.
:param data: The request payload.
:param input_content_type: The request content type.
:param output_content_type: The (desired) response content type.
:return: response payload and content type.
"""
# we can use content types to vary input/output handling, but
# here we just assume json for both
data = json.loads(data)
# the input request payload has to be deserialized twice since it has a discrete header
data = json.loads(data)
df_parsed = pd.DataFrame(data)
prediction = net.predict(df_parsed)
response_body = json.dumps(prediction.tolist())
return response_body, output_content_type
# ------------------------------------------------------------ #
# Training execution #
# ------------------------------------------------------------ #
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAINING'])
parser.add_argument('--filename', type=str, default='train.csv')
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
parser.add_argument('--target', type=str, default='target')
parser.add_argument('--s3-output', type=str, default='s3://autogluon-test/results')
parser.add_argument('--training-job-name', type=str, default=json.loads(os.environ['SM_TRAINING_ENV'])['job_name'])
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
predictor = train(args)
training_dir = args.train
train_file = args.filename
test_file = train_file.replace('train', 'test', 1)
dataset_name = train_file.split('_')[0]
print(dataset_name)
test_data = task.Dataset(file_path=os.path.join(training_dir, test_file))
u = urlparse(args.s3_output, allow_fragments=False)
bucket = u.netloc
print(bucket)
prefix = u.path.strip('/')
print(prefix)
s3 = boto3.client('s3')
try:
y_test = test_data[args.target] # values to predict
test_data_nolab = test_data.drop(labels=[args.target], axis=1) # delete label column to prove we're not cheating
y_pred = predictor.predict(test_data_nolab)
y_pred_df = pd.DataFrame.from_dict({'True': y_test, 'Predicted': y_pred})
pred_file = f'{dataset_name}_test_predictions.csv'
y_pred_df.to_csv(pred_file, index=False, header=True)
leaderboard = predictor.leaderboard()
lead_file = f'{dataset_name}_leaderboard.csv'
leaderboard.to_csv(lead_file)
perf = predictor.evaluate_predictions(y_true=y_test, y_pred=y_pred, auxiliary_metrics=True)
perf_file = f'{dataset_name}_model_performance.txt'
with open(perf_file, 'w') as f:
print(json.dumps(perf, indent=4), file=f)
summary = predictor.fit_summary()
summ_file = f'{dataset_name}_fit_summary.txt'
with open(summ_file, 'w') as f:
print(summary, file=f)
files_to_upload = [pred_file, lead_file, perf_file, summ_file]
except:
y_pred = predictor.predict(test_data)
y_pred_df = | pd.DataFrame.from_dict({'Predicted': y_pred}) | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# coding=utf-8
import datetime as dt
import logging
# The arrow library is used to handle datetimes
import arrow
import pandas as pd
from parsers import occtonet
from parsers.lib.config import refetch_frequency
# Abbreviations
# JP-HKD : Hokkaido
# JP-TH : Tohoku
# JP-TK : Tokyo area
# JP-CB : Chubu
# JP-HR : Hokuriku
# JP-KN : Kansai
# JP-SK : Shikoku
# JP-KY : Kyushu
# JP-ON : Okinawa
# JP-CG : Chūgoku
sources = {
"JP-HKD": "denkiyoho.hepco.co.jp",
"JP-TH": "setsuden.nw.tohoku-epco.co.jp",
"JP-TK": "www.tepco.co.jp",
"JP-CB": "denki-yoho.chuden.jp",
"JP-HR": "www.rikuden.co.jp/denki-yoho",
"JP-KN": "www.kepco.co.jp",
"JP-SK": "www.yonden.co.jp",
"JP-CG": "www.energia.co.jp",
"JP-KY": "www.kyuden.co.jp/power_usages/pc.html",
"JP-ON": "www.okiden.co.jp/denki/",
}
ZONES_ONLY_LIVE = ["JP-TK", "JP-CB", "JP-SK"]
@refetch_frequency(dt.timedelta(days=1))
def fetch_production(
zone_key="JP-TK",
session=None,
target_datetime=None,
logger=logging.getLogger(__name__),
) -> list:
"""
Calculates production from consumption and imports for a given area
All production is mapped to unknown
"""
df = fetch_production_df(zone_key, session, target_datetime)
# add a row to production for each entry in the dictionary:
datalist = []
for i in df.index:
data = {
"zoneKey": zone_key,
"datetime": df.loc[i, "datetime"].to_pydatetime(),
"production": {
"biomass": None,
"coal": None,
"gas": None,
"hydro": None,
"nuclear": None,
"oil": None,
"solar": df.loc[i, "solar"] if "solar" in df.columns else None,
"wind": None,
"geothermal": None,
"unknown": df.loc[i, "unknown"],
},
"source": "occtonet.or.jp, {}".format(sources[zone_key]),
}
datalist.append(data)
return datalist
def fetch_production_df(
zone_key="JP-TK",
session=None,
target_datetime=None,
logger=logging.getLogger(__name__),
):
"""
Calculates production from consumption and imports for a given area.
All production is mapped to unknown.
"""
exch_map = {
"JP-HKD": ["JP-TH"],
"JP-TH": ["JP-TK", "JP-HKD"],
"JP-TK": ["JP-TH", "JP-CB"],
"JP-CB": ["JP-TK", "JP-HR", "JP-KN"],
"JP-HR": ["JP-CB", "JP-KN"],
"JP-KN": ["JP-CB", "JP-HR", "JP-SK", "JP-CG"],
"JP-SK": ["JP-KN", "JP-CG"],
"JP-CG": ["JP-KN", "JP-SK", "JP-KY"],
"JP-ON": [],
"JP-KY": ["JP-CG"],
}
df = fetch_consumption_df(zone_key, target_datetime)
df["imports"] = 0
for zone in exch_map[zone_key]:
df2 = occtonet.fetch_exchange(
zone_key1=zone_key,
zone_key2=zone,
session=session,
target_datetime=target_datetime,
)
df2 = pd.DataFrame(df2)
exchname = df2.loc[0, "sortedZoneKeys"]
df2 = df2[["datetime", "netFlow"]]
df2.columns = ["datetime", exchname]
df = | pd.merge(df, df2, how="inner", on="datetime") | pandas.merge |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas
from pandas.compat import string_types
from pandas.core.dtypes.cast import find_common_type
from pandas.core.dtypes.common import (
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_bool_dtype,
)
from pandas.core.index import _ensure_index
from pandas.core.base import DataError
from modin.error_message import ErrorMessage
from modin.engines.base.block_partitions import BaseBlockPartitions
class PandasQueryCompiler(object):
"""This class implements the logic necessary for operating on partitions
with a Pandas backend. This logic is specific to Pandas."""
def __init__(
self,
block_partitions_object: BaseBlockPartitions,
index: pandas.Index,
columns: pandas.Index,
dtypes=None,
):
assert isinstance(block_partitions_object, BaseBlockPartitions)
self.data = block_partitions_object
self.index = index
self.columns = columns
if dtypes is not None:
self._dtype_cache = dtypes
def __constructor__(self, block_paritions_object, index, columns, dtypes=None):
"""By default, constructor method will invoke an init"""
return type(self)(block_paritions_object, index, columns, dtypes)
# Index, columns and dtypes objects
_dtype_cache = None
def _get_dtype(self):
if self._dtype_cache is None:
map_func = self._prepare_method(lambda df: df.dtypes)
def dtype_builder(df):
return df.apply(lambda row: find_common_type(row.values), axis=0)
self._dtype_cache = self.data.full_reduce(map_func, dtype_builder, 0)
self._dtype_cache.index = self.columns
elif not self._dtype_cache.index.equals(self.columns):
self._dtype_cache.index = self.columns
return self._dtype_cache
def _set_dtype(self, dtypes):
self._dtype_cache = dtypes
dtypes = property(_get_dtype, _set_dtype)
# These objects are currently not distributed.
_index_cache = None
_columns_cache = None
def _get_index(self):
return self._index_cache
def _get_columns(self):
return self._columns_cache
def _validate_set_axis(self, new_labels, old_labels):
new_labels = _ensure_index(new_labels)
old_len = len(old_labels)
new_len = len(new_labels)
if old_len != new_len:
raise ValueError(
"Length mismatch: Expected axis has %d elements, "
"new values have %d elements" % (old_len, new_len)
)
return new_labels
def _set_index(self, new_index):
if self._index_cache is None:
self._index_cache = _ensure_index(new_index)
else:
new_index = self._validate_set_axis(new_index, self._index_cache)
self._index_cache = new_index
def _set_columns(self, new_columns):
if self._columns_cache is None:
self._columns_cache = _ensure_index(new_columns)
else:
new_columns = self._validate_set_axis(new_columns, self._columns_cache)
self._columns_cache = new_columns
columns = property(_get_columns, _set_columns)
index = property(_get_index, _set_index)
# END Index, columns, and dtypes objects
def compute_index(self, axis, data_object, compute_diff=True):
"""Computes the index after a number of rows have been removed.
Note: In order for this to be used properly, the indexes must not be
changed before you compute this.
Args:
axis: The axis to extract the index from.
data_object: The new data object to extract the index from.
compute_diff: True to use `self` to compute the index from self
rather than data_object. This is used when the dimension of the
index may have changed, but the deleted rows/columns are
unknown.
Returns:
A new pandas.Index object.
"""
def pandas_index_extraction(df, axis):
if not axis:
return df.index
else:
try:
return df.columns
except AttributeError:
return pandas.Index([])
index_obj = self.index if not axis else self.columns
old_blocks = self.data if compute_diff else None
new_indices = data_object.get_indices(
axis=axis,
index_func=lambda df: pandas_index_extraction(df, axis),
old_blocks=old_blocks,
)
return index_obj[new_indices] if compute_diff else new_indices
# END Index and columns objects
# Internal methods
# These methods are for building the correct answer in a modular way.
# Please be careful when changing these!
def _prepare_method(self, pandas_func, **kwargs):
"""Prepares methods given various metadata.
Args:
pandas_func: The function to prepare.
Returns
Helper function which handles potential transpose.
"""
if self._is_transposed:
def helper(df, internal_indices=[]):
return pandas_func(df.T, **kwargs)
else:
def helper(df, internal_indices=[]):
return pandas_func(df, **kwargs)
return helper
def numeric_columns(self, include_bool=True):
"""Returns the numeric columns of the Manager.
Returns:
List of index names.
"""
columns = []
for col, dtype in zip(self.columns, self.dtypes):
if is_numeric_dtype(dtype) and (
include_bool or (not include_bool and dtype != np.bool_)
):
columns.append(col)
return columns
def numeric_function_clean_dataframe(self, axis):
"""Preprocesses numeric functions to clean dataframe and pick numeric indices.
Args:
axis: '0' if columns and '1' if rows.
Returns:
Tuple with return value(if any), indices to apply func to & cleaned Manager.
"""
result = None
query_compiler = self
# If no numeric columns and over columns, then return empty Series
if not axis and len(self.index) == 0:
result = pandas.Series(dtype=np.int64)
nonnumeric = [
col
for col, dtype in zip(self.columns, self.dtypes)
if not is_numeric_dtype(dtype)
]
if len(nonnumeric) == len(self.columns):
# If over rows and no numeric columns, return this
if axis:
result = pandas.Series([np.nan for _ in self.index])
else:
result = pandas.Series([0 for _ in self.index])
else:
query_compiler = self.drop(columns=nonnumeric)
return result, query_compiler
# END Internal methods
# Metadata modification methods
def add_prefix(self, prefix):
new_column_names = self.columns.map(lambda x: str(prefix) + str(x))
new_dtype_cache = self._dtype_cache.copy()
if new_dtype_cache is not None:
new_dtype_cache.index = new_column_names
return self.__constructor__(
self.data, self.index, new_column_names, new_dtype_cache
)
def add_suffix(self, suffix):
new_column_names = self.columns.map(lambda x: str(x) + str(suffix))
new_dtype_cache = self._dtype_cache.copy()
if new_dtype_cache is not None:
new_dtype_cache.index = new_column_names
return self.__constructor__(
self.data, self.index, new_column_names, new_dtype_cache
)
# END Metadata modification methods
# Copy
# For copy, we don't want a situation where we modify the metadata of the
# copies if we end up modifying something here. We copy all of the metadata
# to prevent that.
def copy(self):
return self.__constructor__(
self.data.copy(), self.index.copy(), self.columns.copy(), self._dtype_cache
)
# Append/Concat/Join (Not Merge)
# The append/concat/join operations should ideally never trigger remote
# compute. These operations should only ever be manipulations of the
# metadata of the resulting object. It should just be a simple matter of
# appending the other object's blocks and adding np.nan columns for the new
# columns, if needed. If new columns are added, some compute may be
# required, though it can be delayed.
#
# Currently this computation is not delayed, and it may make a copy of the
# DataFrame in memory. This can be problematic and should be fixed in the
# future. TODO (devin-petersohn): Delay reindexing
def _join_index_objects(self, axis, other_index, how, sort=True):
"""Joins a pair of index objects (columns or rows) by a given strategy.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other_index: The other_index to join on.
how: The type of join to join to make (e.g. right, left).
Returns:
Joined indices.
"""
if isinstance(other_index, list):
joined_obj = self.columns if not axis else self.index
# TODO: revisit for performance
for obj in other_index:
joined_obj = joined_obj.join(obj, how=how)
return joined_obj
if not axis:
return self.columns.join(other_index, how=how, sort=sort)
else:
return self.index.join(other_index, how=how, sort=sort)
def join(self, other, **kwargs):
"""Joins a list or two objects together
Args:
other: The other object(s) to join on.
Returns:
Joined objects.
"""
if isinstance(other, list):
return self._join_list_of_managers(other, **kwargs)
else:
return self._join_query_compiler(other, **kwargs)
def concat(self, axis, other, **kwargs):
"""Concatenates two objects together.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concat with.
Returns:
Concatenated objects.
"""
return self._append_list_of_managers(other, axis, **kwargs)
def _append_list_of_managers(self, others, axis, **kwargs):
if not isinstance(others, list):
others = [others]
assert all(
isinstance(other, type(self)) for other in others
), "Different Manager objects are being used. This is not allowed"
sort = kwargs.get("sort", None)
join = kwargs.get("join", "outer")
ignore_index = kwargs.get("ignore_index", False)
# Concatenating two managers requires aligning their indices. After the
# indices are aligned, it should just be a simple concatenation of the
# `BaseBlockPartitions` objects. This should not require remote compute.
joined_axis = self._join_index_objects(
axis,
[other.columns if axis == 0 else other.index for other in others],
join,
sort=sort,
)
# Since we are concatenating a list of managers, we will align all of
# the indices based on the `joined_axis` computed above.
to_append = [other.reindex(axis ^ 1, joined_axis).data for other in others]
new_self = self.reindex(axis ^ 1, joined_axis).data
new_data = new_self.concat(axis, to_append)
if axis == 0:
# The indices will be appended to form the final index.
# If `ignore_index` is true, we create a RangeIndex that is the
# length of all of the index objects combined. This is the same
# behavior as pandas.
new_index = (
self.index.append([other.index for other in others])
if not ignore_index
else pandas.RangeIndex(
len(self.index) + sum(len(other.index) for other in others)
)
)
return self.__constructor__(new_data, new_index, joined_axis)
else:
# The columns will be appended to form the final columns.
new_columns = self.columns.append([other.columns for other in others])
return self.__constructor__(new_data, joined_axis, new_columns)
def _join_query_compiler(self, other, **kwargs):
assert isinstance(
other, type(self)
), "This method is for data manager objects only"
# Uses join's default value (though should not revert to default)
how = kwargs.get("how", "left")
sort = kwargs.get("sort", False)
lsuffix = kwargs.get("lsuffix", "")
rsuffix = kwargs.get("rsuffix", "")
joined_index = self._join_index_objects(1, other.index, how, sort=sort)
to_join = other.reindex(0, joined_index).data
new_self = self.reindex(0, joined_index).data
new_data = new_self.concat(1, to_join)
# We are using proxy DataFrame objects to build the columns based on
# the `lsuffix` and `rsuffix`.
self_proxy = pandas.DataFrame(columns=self.columns)
other_proxy = pandas.DataFrame(columns=other.columns)
new_columns = self_proxy.join(
other_proxy, lsuffix=lsuffix, rsuffix=rsuffix
).columns
return self.__constructor__(new_data, joined_index, new_columns)
def _join_list_of_managers(self, others, **kwargs):
assert isinstance(
others, list
), "This method is for lists of DataManager objects only"
assert all(
isinstance(other, type(self)) for other in others
), "Different Manager objects are being used. This is not allowed"
# Uses join's default value (though should not revert to default)
how = kwargs.get("how", "left")
sort = kwargs.get("sort", False)
lsuffix = kwargs.get("lsuffix", "")
rsuffix = kwargs.get("rsuffix", "")
joined_index = self._join_index_objects(
1, [other.index for other in others], how, sort=sort
)
to_join = [other.reindex(0, joined_index).data for other in others]
new_self = self.reindex(0, joined_index).data
new_data = new_self.concat(1, to_join)
# This stage is to efficiently get the resulting columns, including the
# suffixes.
self_proxy = pandas.DataFrame(columns=self.columns)
others_proxy = [pandas.DataFrame(columns=other.columns) for other in others]
new_columns = self_proxy.join(
others_proxy, lsuffix=lsuffix, rsuffix=rsuffix
).columns
return self.__constructor__(new_data, joined_index, new_columns)
# END Append/Concat/Join
# Inter-Data operations (e.g. add, sub)
# These operations require two DataFrames and will change the shape of the
# data if the index objects don't match. An outer join + op is performed,
# such that columns/rows that don't have an index on the other DataFrame
# result in NaN values.
def inter_manager_operations(self, other, how_to_join, func):
"""Inter-data operations (e.g. add, sub).
Args:
other: The other Manager for the operation.
how_to_join: The type of join to join to make (e.g. right, outer).
Returns:
New DataManager with new data and index.
"""
assert isinstance(
other, type(self)
), "Must have the same DataManager subclass to perform this operation"
joined_index = self._join_index_objects(1, other.index, how_to_join, sort=False)
new_columns = self._join_index_objects(
0, other.columns, how_to_join, sort=False
)
reindexed_other = other.reindex(0, joined_index).data
reindexed_self = self.reindex(0, joined_index).data
# THere is an interesting serialization anomaly that happens if we do
# not use the columns in `inter_data_op_builder` from here (e.g. if we
# pass them in). Passing them in can cause problems, so we will just
# use them from here.
self_cols = self.columns
other_cols = other.columns
def inter_data_op_builder(left, right, self_cols, other_cols, func):
left.columns = self_cols
right.columns = other_cols
result = func(left, right)
result.columns = pandas.RangeIndex(len(result.columns))
return result
new_data = reindexed_self.inter_data_operation(
1,
lambda l, r: inter_data_op_builder(l, r, self_cols, other_cols, func),
reindexed_other,
)
return self.__constructor__(new_data, joined_index, new_columns)
def _inter_df_op_handler(self, func, other, **kwargs):
"""Helper method for inter-manager and scalar operations.
Args:
func: The function to use on the Manager/scalar.
other: The other Manager/scalar.
Returns:
New DataManager with new data and index.
"""
axis = pandas.DataFrame()._get_axis_number(kwargs.get("axis", 0))
if isinstance(other, type(self)):
return self.inter_manager_operations(
other, "outer", lambda x, y: func(x, y, **kwargs)
)
else:
return self.scalar_operations(
axis, other, lambda df: func(df, other, **kwargs)
)
def add(self, other, **kwargs):
"""Adds this manager with other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with added data and new index.
"""
func = pandas.DataFrame.add
return self._inter_df_op_handler(func, other, **kwargs)
def div(self, other, **kwargs):
"""Divides this manager with other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with divided data and new index.
"""
func = pandas.DataFrame.div
return self._inter_df_op_handler(func, other, **kwargs)
def eq(self, other, **kwargs):
"""Compares equality (==) with other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with compared data and index.
"""
func = pandas.DataFrame.eq
return self._inter_df_op_handler(func, other, **kwargs)
def floordiv(self, other, **kwargs):
"""Floordivs this manager with other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with floordiv-ed data and index.
"""
func = pandas.DataFrame.floordiv
return self._inter_df_op_handler(func, other, **kwargs)
def ge(self, other, **kwargs):
"""Compares this manager >= than other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with compared data and index.
"""
func = pandas.DataFrame.ge
return self._inter_df_op_handler(func, other, **kwargs)
def gt(self, other, **kwargs):
"""Compares this manager > than other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with compared data and index.
"""
func = pandas.DataFrame.gt
return self._inter_df_op_handler(func, other, **kwargs)
def le(self, other, **kwargs):
"""Compares this manager < than other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with compared data and index.
"""
func = pandas.DataFrame.le
return self._inter_df_op_handler(func, other, **kwargs)
def lt(self, other, **kwargs):
"""Compares this manager <= than other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with compared data and index.
"""
func = pandas.DataFrame.lt
return self._inter_df_op_handler(func, other, **kwargs)
def mod(self, other, **kwargs):
"""Mods this manager against other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with mod-ed data and index.
"""
func = pandas.DataFrame.mod
return self._inter_df_op_handler(func, other, **kwargs)
def mul(self, other, **kwargs):
"""Multiplies this manager against other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with multiplied data and index.
"""
func = pandas.DataFrame.mul
return self._inter_df_op_handler(func, other, **kwargs)
def ne(self, other, **kwargs):
"""Compares this manager != to other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with compared data and index.
"""
func = pandas.DataFrame.ne
return self._inter_df_op_handler(func, other, **kwargs)
def pow(self, other, **kwargs):
"""Exponential power of this manager to other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with pow-ed data and index.
"""
func = pandas.DataFrame.pow
return self._inter_df_op_handler(func, other, **kwargs)
def rdiv(self, other, **kwargs):
"""Divides other object (manager or scalar) with this manager.
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with divided data and new index.
"""
func = pandas.DataFrame.rdiv
return self._inter_df_op_handler(func, other, **kwargs)
def rfloordiv(self, other, **kwargs):
"""Floordivs this manager with other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with floordiv-ed data and index.
"""
func = pandas.DataFrame.rfloordiv
return self._inter_df_op_handler(func, other, **kwargs)
def rmod(self, other, **kwargs):
"""Mods this manager with other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with mod data and index.
"""
func = pandas.DataFrame.rmod
return self._inter_df_op_handler(func, other, **kwargs)
def rpow(self, other, **kwargs):
"""Exponential power of other object (manager or scalar) to this manager.
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with pow-ed data and new index.
"""
func = pandas.DataFrame.rpow
return self._inter_df_op_handler(func, other, **kwargs)
def rsub(self, other, **kwargs):
"""Subtracts other object (manager or scalar) from this manager.
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with subtracted data and new index.
"""
func = pandas.DataFrame.rsub
return self._inter_df_op_handler(func, other, **kwargs)
def sub(self, other, **kwargs):
"""Subtracts this manager from other object (manager or scalar).
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with subtracted data and new index.
"""
func = pandas.DataFrame.sub
return self._inter_df_op_handler(func, other, **kwargs)
def truediv(self, other, **kwargs):
"""Divides this manager with other object (manager or scalar).
Functionally same as div
Args:
other: The other object (manager or scalar).
Returns:
New DataManager with divided data and new index.
"""
func = pandas.DataFrame.truediv
return self._inter_df_op_handler(func, other, **kwargs)
def clip(self, lower, upper, **kwargs):
kwargs["upper"] = upper
kwargs["lower"] = lower
axis = kwargs.get("axis", 0)
func = self._prepare_method(pandas.DataFrame.clip, **kwargs)
if is_list_like(lower) or is_list_like(upper):
df = self.map_across_full_axis(axis, func)
return self.__constructor__(df, self.index, self.columns)
return self.scalar_operations(axis, lower or upper, func)
def update(self, other, **kwargs):
"""Uses other manager to update corresponding values in this manager.
Args:
other: The other manager.
Returns:
New DataManager with updated data and index.
"""
assert isinstance(
other, type(self)
), "Must have the same DataManager subclass to perform this operation"
def update_builder(df, other, **kwargs):
# This is because of a requirement in Arrow
df = df.copy()
df.update(other, **kwargs)
return df
return self._inter_df_op_handler(update_builder, other, **kwargs)
def where(self, cond, other, **kwargs):
"""Gets values from this manager where cond is true else from other.
Args:
cond: Condition on which to evaluate values.
Returns:
New DataManager with updated data and index.
"""
assert isinstance(
cond, type(self)
), "Must have the same DataManager subclass to perform this operation"
if isinstance(other, type(self)):
# Note: Currently we are doing this with two maps across the entire
# data. This can be done with a single map, but it will take a
# modification in the `BlockPartition` class.
# If this were in one pass it would be ~2x faster.
# TODO (devin-petersohn) rewrite this to take one pass.
def where_builder_first_pass(cond, other, **kwargs):
return cond.where(cond, other, **kwargs)
def where_builder_second_pass(df, new_other, **kwargs):
return df.where(new_other.eq(True), new_other, **kwargs)
# We are required to perform this reindexing on everything to
# shuffle the data together
reindexed_cond = cond.reindex(0, self.index).data
reindexed_other = other.reindex(0, self.index).data
reindexed_self = self.reindex(0, self.index).data
first_pass = reindexed_cond.inter_data_operation(
1,
lambda l, r: where_builder_first_pass(l, r, **kwargs),
reindexed_other,
)
final_pass = reindexed_self.inter_data_operation(
1, lambda l, r: where_builder_second_pass(l, r, **kwargs), first_pass
)
return self.__constructor__(final_pass, self.index, self.columns)
else:
axis = kwargs.get("axis", 0)
# Rather than serializing and passing in the index/columns, we will
# just change this index to match the internal index.
if isinstance(other, pandas.Series):
other.index = [i for i in range(len(other))]
def where_builder_series(df, cond, other, **kwargs):
return df.where(cond, other, **kwargs)
reindexed_self = self.reindex(
axis, self.index if not axis else self.columns
).data
reindexed_cond = cond.reindex(
axis, self.index if not axis else self.columns
).data
new_data = reindexed_self.inter_data_operation(
axis,
lambda l, r: where_builder_series(l, r, other, **kwargs),
reindexed_cond,
)
return self.__constructor__(new_data, self.index, self.columns)
# END Inter-Data operations
# Single Manager scalar operations (e.g. add to scalar, list of scalars)
def scalar_operations(self, axis, scalar, func):
"""Handler for mapping scalar operations across a Manager.
Args:
axis: The axis index object to execute the function on.
scalar: The scalar value to map.
func: The function to use on the Manager with the scalar.
Returns:
New DataManager with updated data and new index.
"""
if isinstance(scalar, (list, np.ndarray, pandas.Series)):
new_data = self.map_across_full_axis(axis, func)
return self.__constructor__(new_data, self.index, self.columns)
else:
return self.map_partitions(func)
# END Single Manager scalar operations
# Reindex/reset_index (may shuffle data)
def reindex(self, axis, labels, **kwargs):
"""Fits a new index for this Manger.
Args:
axis: The axis index object to target the reindex on.
labels: New labels to conform 'axis' on to.
Returns:
New DataManager with updated data and new index.
"""
# To reindex, we need a function that will be shipped to each of the
# partitions.
def reindex_builer(df, axis, old_labels, new_labels, **kwargs):
if axis:
while len(df.columns) < len(old_labels):
df[len(df.columns)] = np.nan
df.columns = old_labels
new_df = df.reindex(columns=new_labels, **kwargs)
# reset the internal columns back to a RangeIndex
new_df.columns = pandas.RangeIndex(len(new_df.columns))
return new_df
else:
while len(df.index) < len(old_labels):
df.loc[len(df.index)] = np.nan
df.index = old_labels
new_df = df.reindex(index=new_labels, **kwargs)
# reset the internal index back to a RangeIndex
new_df.reset_index(inplace=True, drop=True)
return new_df
old_labels = self.columns if axis else self.index
new_index = self.index if axis else labels
new_columns = labels if axis else self.columns
func = self._prepare_method(
lambda df: reindex_builer(df, axis, old_labels, labels, **kwargs)
)
# The reindex can just be mapped over the axis we are modifying. This
# is for simplicity in implementation. We specify num_splits here
# because if we are repartitioning we should (in the future).
# Additionally this operation is often followed by an operation that
# assumes identical partitioning. Internally, we *may* change the
# partitioning during a map across a full axis.
new_data = self.map_across_full_axis(axis, func)
return self.__constructor__(new_data, new_index, new_columns)
def reset_index(self, **kwargs):
"""Removes all levels from index and sets a default level_0 index.
Returns:
New DataManager with updated data and reset index.
"""
drop = kwargs.get("drop", False)
new_index = pandas.RangeIndex(len(self.index))
if not drop:
if isinstance(self.index, pandas.MultiIndex):
# TODO (devin-petersohn) ensure partitioning is properly aligned
new_column_names = pandas.Index(self.index.names)
new_columns = new_column_names.append(self.columns)
index_data = pandas.DataFrame(list(zip(*self.index))).T
result = self.data.from_pandas(index_data).concat(1, self.data)
return self.__constructor__(result, new_index, new_columns)
else:
new_column_name = "index" if "index" not in self.columns else "level_0"
new_columns = self.columns.insert(0, new_column_name)
result = self.insert(0, new_column_name, self.index)
return self.__constructor__(result.data, new_index, new_columns)
else:
# The copies here are to ensure that we do not give references to
# this object for the purposes of updates.
return self.__constructor__(
self.data.copy(), new_index, self.columns.copy(), self._dtype_cache
)
# END Reindex/reset_index
# Transpose
# For transpose, we aren't going to immediately copy everything. Since the
# actual transpose operation is very fast, we will just do it before any
# operation that gets called on the transposed data. See _prepare_method
# for how the transpose is applied.
#
# Our invariants assume that the blocks are transposed, but not the
# data inside. Sometimes we have to reverse this transposition of blocks
# for simplicity of implementation.
#
# _is_transposed, 0 for False or non-transposed, 1 for True or transposed.
_is_transposed = 0
def transpose(self, *args, **kwargs):
"""Transposes this DataManager.
Returns:
Transposed new DataManager.
"""
new_data = self.data.transpose(*args, **kwargs)
# Switch the index and columns and transpose the
new_manager = self.__constructor__(new_data, self.columns, self.index)
# It is possible that this is already transposed
new_manager._is_transposed = self._is_transposed ^ 1
return new_manager
# END Transpose
# Full Reduce operations
#
# These operations result in a reduced dimensionality of data.
# Currently, this means a Pandas Series will be returned, but in the future
# we will implement a Distributed Series, and this will be returned
# instead.
def full_reduce(self, axis, map_func, reduce_func=None, numeric_only=False):
"""Apply function that will reduce the data to a Pandas Series.
Args:
axis: 0 for columns and 1 for rows. Default is 0.
map_func: Callable function to map the dataframe.
reduce_func: Callable function to reduce the dataframe. If none,
then apply map_func twice.
numeric_only: Apply only over the numeric rows.
Return:
Returns Pandas Series containing the results from map_func and reduce_func.
"""
if numeric_only:
result, query_compiler = self.numeric_function_clean_dataframe(axis)
if result is not None:
return result
else:
query_compiler = self
if reduce_func is None:
reduce_func = map_func
# The XOR here will ensure that we reduce over the correct axis that
# exists on the internal partitions. We flip the axis
result = query_compiler.data.full_reduce(
map_func, reduce_func, axis ^ self._is_transposed
)
if result.shape == (0,):
return result
elif not axis:
result.index = query_compiler.columns
else:
result.index = query_compiler.index
return result
def _process_min_max(self, func, **kwargs):
"""Calculates the min or max of the DataFrame.
Return:
Pandas series containing the min or max values from each column or
row.
"""
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
numeric_only = True if axis else kwargs.get("numeric_only", False)
def min_max_builder(df, **kwargs):
if not df.empty:
return func(df, **kwargs)
map_func = self._prepare_method(min_max_builder, **kwargs)
return self.full_reduce(axis, map_func, numeric_only=numeric_only)
def count(self, **kwargs):
"""Counts the number of non-NaN objects for each column or row.
Return:
Pandas series containing counts of non-NaN objects from each column or row.
"""
axis = kwargs.get("axis", 0)
numeric_only = kwargs.get("numeric_only", False)
map_func = self._prepare_method(pandas.DataFrame.count, **kwargs)
reduce_func = self._prepare_method(pandas.DataFrame.sum, **kwargs)
return self.full_reduce(axis, map_func, reduce_func, numeric_only)
def max(self, **kwargs):
"""Returns the maximum value for each column or row.
Return:
Pandas series with the maximum values from each column or row.
"""
return self._process_min_max(pandas.DataFrame.max, **kwargs)
def mean(self, **kwargs):
"""Returns the mean for each numerical column or row.
Return:
Pandas series containing the mean from each numerical column or row.
"""
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
sums = self.sum(**kwargs)
counts = self.count(axis=axis, numeric_only=kwargs.get("numeric_only", None))
try:
# If we need to drop any columns, it will throw a TypeError
return sums.divide(counts)
# In the case that a TypeError is thrown, we need to iterate through, similar to
# how pandas does and do the division only on things that can be divided.
# NOTE: We will only hit this condition if numeric_only is not True.
except TypeError:
def can_divide(l, r):
try:
pandas.Series([l]).divide(r)
except TypeError:
return False
return True
# Iterate through the sums to check that we can divide them. If not, then
# drop the record. This matches pandas behavior.
return pandas.Series(
{
idx: sums[idx] / counts[idx]
for idx in sums.index
if can_divide(sums[idx], counts[idx])
}
)
def min(self, **kwargs):
"""Returns the minimum from each column or row.
Return:
Pandas series with the minimum value from each column or row.
"""
return self._process_min_max(pandas.DataFrame.min, **kwargs)
def _process_sum_prod(self, func, **kwargs):
"""Calculates the sum or product of the DataFrame.
Args:
func: Pandas func to apply to DataFrame.
ignore_axis: Whether to ignore axis when raising TypeError
Return:
Pandas Series with sum or prod of DataFrame.
"""
axis = kwargs.get("axis", 0)
numeric_only = kwargs.get("numeric_only", None) if not axis else True
min_count = kwargs.get("min_count", 0)
reduce_index = self.columns if axis else self.index
if numeric_only:
result, query_compiler = self.numeric_function_clean_dataframe(axis)
else:
query_compiler = self
new_index = query_compiler.index if axis else query_compiler.columns
def sum_prod_builder(df, **kwargs):
if not df.empty:
return func(df, **kwargs)
else:
return pandas.DataFrame([])
map_func = self._prepare_method(sum_prod_builder, **kwargs)
if min_count <= 1:
return self.full_reduce(axis, map_func, numeric_only=numeric_only)
elif min_count > len(reduce_index):
return pandas.Series(
[np.nan] * len(new_index), index=new_index, dtype=np.dtype("object")
)
else:
return self.full_axis_reduce(map_func, axis)
def prod(self, **kwargs):
"""Returns the product of each numerical column or row.
Return:
Pandas series with the product of each numerical column or row.
"""
return self._process_sum_prod(pandas.DataFrame.prod, **kwargs)
def sum(self, **kwargs):
"""Returns the sum of each numerical column or row.
Return:
Pandas series with the sum of each numerical column or row.
"""
return self._process_sum_prod(pandas.DataFrame.sum, **kwargs)
# END Full Reduce operations
# Map partitions operations
# These operations are operations that apply a function to every partition.
def map_partitions(self, func, new_dtypes=None):
return self.__constructor__(
self.data.map_across_blocks(func), self.index, self.columns, new_dtypes
)
def abs(self):
func = self._prepare_method(pandas.DataFrame.abs)
return self.map_partitions(func, new_dtypes=self.dtypes.copy())
def applymap(self, func):
remote_func = self._prepare_method(pandas.DataFrame.applymap, func=func)
return self.map_partitions(remote_func)
def isin(self, **kwargs):
func = self._prepare_method(pandas.DataFrame.isin, **kwargs)
new_dtypes = pandas.Series(
[np.dtype("bool") for _ in self.columns], index=self.columns
)
return self.map_partitions(func, new_dtypes=new_dtypes)
def isna(self):
func = self._prepare_method(pandas.DataFrame.isna)
new_dtypes = pandas.Series(
[np.dtype("bool") for _ in self.columns], index=self.columns
)
return self.map_partitions(func, new_dtypes=new_dtypes)
def isnull(self):
func = self._prepare_method(pandas.DataFrame.isnull)
new_dtypes = pandas.Series(
[np.dtype("bool") for _ in self.columns], index=self.columns
)
return self.map_partitions(func, new_dtypes=new_dtypes)
def negative(self, **kwargs):
func = self._prepare_method(pandas.DataFrame.__neg__, **kwargs)
return self.map_partitions(func)
def notna(self):
func = self._prepare_method(pandas.DataFrame.notna)
new_dtypes = pandas.Series(
[np.dtype("bool") for _ in self.columns], index=self.columns
)
return self.map_partitions(func, new_dtypes=new_dtypes)
def notnull(self):
func = self._prepare_method(pandas.DataFrame.notnull)
new_dtypes = pandas.Series(
[np.dtype("bool") for _ in self.columns], index=self.columns
)
return self.map_partitions(func, new_dtypes=new_dtypes)
def round(self, **kwargs):
func = self._prepare_method(pandas.DataFrame.round, **kwargs)
return self.map_partitions(func, new_dtypes=self._dtype_cache)
# END Map partitions operations
# Map partitions across select indices
def astype(self, col_dtypes, **kwargs):
"""Converts columns dtypes to given dtypes.
Args:
col_dtypes: Dictionary of {col: dtype,...} where col is the column
name and dtype is a numpy dtype.
Returns:
DataFrame with updated dtypes.
"""
# Group indices to update by dtype for less map operations
dtype_indices = {}
columns = col_dtypes.keys()
numeric_indices = list(self.columns.get_indexer_for(columns))
# Create Series for the updated dtypes
new_dtypes = self.dtypes.copy()
for i, column in enumerate(columns):
dtype = col_dtypes[column]
if dtype != self.dtypes[column]:
# Only add dtype only if different
if dtype in dtype_indices.keys():
dtype_indices[dtype].append(numeric_indices[i])
else:
dtype_indices[dtype] = [numeric_indices[i]]
# Update the new dtype series to the proper pandas dtype
new_dtype = np.dtype(dtype)
if dtype != np.int32 and new_dtype == np.int32:
new_dtype = np.dtype("int64")
elif dtype != np.float32 and new_dtype == np.float32:
new_dtype = np.dtype("float64")
new_dtypes[column] = new_dtype
# Update partitions for each dtype that is updated
new_data = self.data
for dtype in dtype_indices.keys():
def astype(df, internal_indices=[]):
block_dtypes = {}
for ind in internal_indices:
block_dtypes[df.columns[ind]] = dtype
return df.astype(block_dtypes)
new_data = new_data.apply_func_to_select_indices(
0, astype, dtype_indices[dtype], keep_remaining=True
)
return self.__constructor__(new_data, self.index, self.columns, new_dtypes)
# END Map partitions across select indices
# Column/Row partitions reduce operations
#
# These operations result in a reduced dimensionality of data.
# Currently, this means a Pandas Series will be returned, but in the future
# we will implement a Distributed Series, and this will be returned
# instead.
def full_axis_reduce(self, func, axis, alternate_index=None):
"""Applies map that reduce Manager to series but require knowledge of full axis.
Args:
func: Function to reduce the Manager by. This function takes in a Manager.
axis: axis to apply the function to.
alternate_index: If the resulting series should have an index
different from the current query_compiler's index or columns.
Return:
Pandas series containing the reduced data.
"""
# We XOR with axis because if we are doing an operation over the columns
# (i.e. along the rows), we want to take the transpose so that the
# results from the same parition will be concated together first.
# We need this here because if the operations is over the columns,
# map_across_full_axis does not transpose the result before returning.
result = self.data.map_across_full_axis(axis, func).to_pandas(
self._is_transposed ^ axis
)
if result.empty:
return result
if not axis:
result.index = (
alternate_index if alternate_index is not None else self.columns
)
else:
result.index = (
alternate_index if alternate_index is not None else self.index
)
return result
def all(self, **kwargs):
"""Returns whether all the elements are true, potentially over an axis.
Return:
Pandas Series containing boolean values or boolean.
"""
return self._process_all_any(lambda df, **kwargs: df.all(**kwargs), **kwargs)
def any(self, **kwargs):
"""Returns whether any the elements are true, potentially over an axis.
Return:
Pandas Series containing boolean values or boolean.
"""
return self._process_all_any(lambda df, **kwargs: df.any(**kwargs), **kwargs)
def _process_all_any(self, func, **kwargs):
"""Calculates if any or all the values are true.
Return:
Pandas Series containing boolean values or boolean.
"""
axis = kwargs.get("axis", 0)
axis_none = True if axis is None else False
axis = 0 if axis is None else axis
kwargs["axis"] = axis
bool_only = kwargs.get("bool_only", None)
kwargs["bool_only"] = False if bool_only is None else bool_only
not_bool_col = []
numeric_col_count = 0
for col, dtype in zip(self.columns, self.dtypes):
if not is_bool_dtype(dtype):
not_bool_col.append(col)
numeric_col_count += 1 if is_numeric_dtype(dtype) else 0
if bool_only:
if axis == 0 and not axis_none and len(not_bool_col) == len(self.columns):
return pandas.Series(dtype=bool)
if len(not_bool_col) == len(self.columns):
query_compiler = self
else:
query_compiler = self.drop(columns=not_bool_col)
else:
if (
bool_only is False
and axis_none
and len(not_bool_col) == len(self.columns)
and numeric_col_count != len(self.columns)
):
if func == pandas.DataFrame.all:
return self.getitem_single_key(self.columns[-1])[self.index[-1]]
elif func == pandas.DataFrame.any:
return self.getitem_single_key(self.columns[0])[self.index[0]]
query_compiler = self
builder_func = query_compiler._prepare_method(func, **kwargs)
result = query_compiler.full_axis_reduce(builder_func, axis)
if axis_none:
return func(result)
else:
return result
def first_valid_index(self):
"""Returns index of first non-NaN/NULL value.
Return:
Scalar of index name.
"""
# It may be possible to incrementally check each partition, but this
# computation is fairly cheap.
def first_valid_index_builder(df):
df.index = pandas.RangeIndex(len(df.index))
return df.apply(lambda df: df.first_valid_index())
func = self._prepare_method(first_valid_index_builder)
# We get the minimum from each column, then take the min of that to get
# first_valid_index.
first_result = self.full_axis_reduce(func, 0)
return self.index[first_result.min()]
def _post_process_idx_ops(self, axis, intermediate_result):
"""Converts internal index to external index.
Args:
axis: 0 for columns and 1 for rows. Defaults to 0.
intermediate_result: Internal index of self.data.
Returns:
External index of the intermediate_result.
"""
index = self.index if not axis else self.columns
result = intermediate_result.apply(lambda x: index[x])
return result
def idxmax(self, **kwargs):
"""Returns the first occurance of the maximum over requested axis.
Returns:
Series containing the maximum of each column or axis.
"""
# The reason for the special treatment with idxmax/min is because we
# need to communicate the row number back here.
def idxmax_builder(df, **kwargs):
df.index = pandas.RangeIndex(len(df.index))
return df.idxmax(**kwargs)
axis = kwargs.get("axis", 0)
func = self._prepare_method(idxmax_builder, **kwargs)
max_result = self.full_axis_reduce(func, axis)
# Because our internal partitions don't track the external index, we
# have to do a conversion.
return self._post_process_idx_ops(axis, max_result)
def idxmin(self, **kwargs):
"""Returns the first occurance of the minimum over requested axis.
Returns:
Series containing the minimum of each column or axis.
"""
# The reason for the special treatment with idxmax/min is because we
# need to communicate the row number back here.
def idxmin_builder(df, **kwargs):
df.index = pandas.RangeIndex(len(df.index))
return df.idxmin(**kwargs)
axis = kwargs.get("axis", 0)
func = self._prepare_method(idxmin_builder, **kwargs)
min_result = self.full_axis_reduce(func, axis)
# Because our internal partitions don't track the external index, we
# have to do a conversion.
return self._post_process_idx_ops(axis, min_result)
def last_valid_index(self):
"""Returns index of last non-NaN/NULL value.
Return:
Scalar of index name.
"""
def last_valid_index_builder(df):
df.index = pandas.RangeIndex(len(df.index))
return df.apply(lambda df: df.last_valid_index())
func = self._prepare_method(last_valid_index_builder)
# We get the maximum from each column, then take the max of that to get
# last_valid_index.
first_result = self.full_axis_reduce(func, 0)
return self.index[first_result.max()]
def median(self, **kwargs):
"""Returns median of each column or row.
Returns:
Series containing the median of each column or row.
"""
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
result, query_compiler = self.numeric_function_clean_dataframe(axis)
if result is not None:
return result
func = self._prepare_method(pandas.DataFrame.median, **kwargs)
return query_compiler.full_axis_reduce(func, axis)
def memory_usage(self, **kwargs):
"""Returns the memory usage of each column.
Returns:
Series containing the memory usage of each column.
"""
def memory_usage_builder(df, **kwargs):
return df.memory_usage(index=False, deep=deep)
deep = kwargs.get("deep", False)
func = self._prepare_method(memory_usage_builder, **kwargs)
return self.full_axis_reduce(func, 0)
def nunique(self, **kwargs):
"""Returns the number of unique items over each column or row.
Returns:
Series of ints indexed by column or index names.
"""
axis = kwargs.get("axis", 0)
func = self._prepare_method(pandas.DataFrame.nunique, **kwargs)
return self.full_axis_reduce(func, axis)
def quantile_for_single_value(self, **kwargs):
"""Returns quantile of each column or row.
Returns:
Series containing the quantile of each column or row.
"""
axis = kwargs.get("axis", 0)
q = kwargs.get("q", 0.5)
numeric_only = kwargs.get("numeric_only", True)
assert type(q) is float
if numeric_only:
result, query_compiler = self.numeric_function_clean_dataframe(axis)
if result is not None:
return result
else:
query_compiler = self
def quantile_builder(df, **kwargs):
try:
return pandas.DataFrame.quantile(df, **kwargs)
except ValueError:
return pandas.Series()
func = self._prepare_method(quantile_builder, **kwargs)
result = query_compiler.full_axis_reduce(func, axis)
result.name = q
return result
def skew(self, **kwargs):
"""Returns skew of each column or row.
Returns:
Series containing the skew of each column or row.
"""
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
result, query_compiler = self.numeric_function_clean_dataframe(axis)
if result is not None:
return result
func = self._prepare_method(pandas.DataFrame.skew, **kwargs)
return query_compiler.full_axis_reduce(func, axis)
def std(self, **kwargs):
"""Returns standard deviation of each column or row.
Returns:
Series containing the standard deviation of each column or row.
"""
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
result, query_compiler = self.numeric_function_clean_dataframe(axis)
if result is not None:
return result
func = self._prepare_method(pandas.DataFrame.std, **kwargs)
return query_compiler.full_axis_reduce(func, axis)
def to_datetime(self, **kwargs):
"""Converts the Manager to a Series of DateTime objects.
Returns:
Series of DateTime objects.
"""
columns = self.columns
def to_datetime_builder(df, **kwargs):
df.columns = columns
return pandas.to_datetime(df, **kwargs)
func = self._prepare_method(to_datetime_builder, **kwargs)
return self.full_axis_reduce(func, 1)
def var(self, **kwargs):
"""Returns variance of each column or row.
Returns:
Series containing the variance of each column or row.
"""
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
result, query_compiler = self.numeric_function_clean_dataframe(axis)
if result is not None:
return result
func = query_compiler._prepare_method(pandas.DataFrame.var, **kwargs)
return query_compiler.full_axis_reduce(func, axis)
# END Column/Row partitions reduce operations
# Column/Row partitions reduce operations over select indices
#
# These operations result in a reduced dimensionality of data.
# Currently, this means a Pandas Series will be returned, but in the future
# we will implement a Distributed Series, and this will be returned
# instead.
def full_axis_reduce_along_select_indices(
self, func, axis, index, pandas_result=True
):
"""Reduce Manger along select indices using function that needs full axis.
Args:
func: Callable that reduces Manager to Series using full knowledge of an
axis.
axis: 0 for columns and 1 for rows. Defaults to 0.
index: Index of the resulting series.
pandas_result: Return the result as a Pandas Series instead of raw data.
Returns:
Either a Pandas Series with index or BaseBlockPartitions object.
"""
# Convert indices to numeric indices
old_index = self.index if axis else self.columns
numeric_indices = [i for i, name in enumerate(old_index) if name in index]
result = self.data.apply_func_to_select_indices_along_full_axis(
axis, func, numeric_indices
)
if pandas_result:
result = result.to_pandas(self._is_transposed)
result.index = index
return result
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
# Only describe numeric if there are numeric columns
# Otherwise, describe all
new_columns = self.numeric_columns(include_bool=False)
if len(new_columns) != 0:
numeric = True
exclude = kwargs.get("exclude", None)
include = kwargs.get("include", None)
# This is done to check against the default dtypes with 'in'.
# We don't change `include` in kwargs, so we can just use this for the
# check.
if include is None:
include = []
default_excludes = [np.timedelta64, np.datetime64, np.object, np.bool]
add_to_excludes = [e for e in default_excludes if e not in include]
if is_list_like(exclude):
exclude.append(add_to_excludes)
else:
exclude = add_to_excludes
kwargs["exclude"] = exclude
else:
numeric = False
# If only timedelta and datetime objects, only do the timedelta
# columns
if all(
(
dtype
for dtype in self.dtypes
if dtype == np.datetime64 or dtype == np.timedelta64
)
):
new_columns = [
self.columns[i]
for i in range(len(self.columns))
if self.dtypes[i] != np.dtype("datetime64[ns]")
]
else:
# Describe all columns
new_columns = self.columns
def describe_builder(df, **kwargs):
try:
return pandas.DataFrame.describe(df, **kwargs)
except ValueError:
return pandas.DataFrame(index=df.index)
# Apply describe and update indices, columns, and dtypes
func = self._prepare_method(describe_builder, **kwargs)
new_data = self.full_axis_reduce_along_select_indices(
func, 0, new_columns, False
)
new_index = self.compute_index(0, new_data, False)
if numeric:
new_dtypes = pandas.Series(
[np.float64 for _ in new_columns], index=new_columns
)
else:
new_dtypes = pandas.Series(
[np.object for _ in new_columns], index=new_columns
)
return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
# END Column/Row partitions reduce operations over select indices
# Map across rows/columns
# These operations require some global knowledge of the full column/row
# that is being operated on. This means that we have to put all of that
# data in the same place.
def map_across_full_axis(self, axis, func):
return self.data.map_across_full_axis(axis, func)
def _cumulative_builder(self, func, **kwargs):
axis = kwargs.get("axis", 0)
func = self._prepare_method(func, **kwargs)
new_data = self.map_across_full_axis(axis, func)
return self.__constructor__(
new_data, self.index, self.columns, self._dtype_cache
)
def cumsum(self, **kwargs):
return self._cumulative_builder(pandas.DataFrame.cumsum, **kwargs)
def cummax(self, **kwargs):
return self._cumulative_builder(pandas.DataFrame.cummax, **kwargs)
def cummin(self, **kwargs):
return self._cumulative_builder(pandas.DataFrame.cummin, **kwargs)
def cumprod(self, **kwargs):
return self._cumulative_builder(pandas.DataFrame.cumprod, **kwargs)
def diff(self, **kwargs):
axis = kwargs.get("axis", 0)
func = self._prepare_method(pandas.DataFrame.diff, **kwargs)
new_data = self.map_across_full_axis(axis, func)
return self.__constructor__(new_data, self.index, self.columns)
def dropna(self, **kwargs):
"""Returns a new DataManager with null values dropped along given axis.
Return:
a new DataManager
"""
axis = kwargs.get("axis", 0)
subset = kwargs.get("subset")
thresh = kwargs.get("thresh")
how = kwargs.get("how", "any")
# We need to subset the axis that we care about with `subset`. This
# will be used to determine the number of values that are NA.
if subset is not None:
if not axis:
compute_na = self.getitem_column_array(subset)
else:
compute_na = self.getitem_row_array(self.index.get_indexer_for(subset))
else:
compute_na = self
if not isinstance(axis, list):
axis = [axis]
# We are building this dictionary first to determine which columns
# and rows to drop. This way we do not drop some columns before we
# know which rows need to be dropped.
if thresh is not None:
# Count the number of NA values and specify which are higher than
# thresh.
drop_values = {
ax ^ 1: compute_na.isna().sum(axis=ax ^ 1) > thresh for ax in axis
}
else:
drop_values = {
ax ^ 1: getattr(compute_na.isna(), how)(axis=ax ^ 1) for ax in axis
}
if 0 not in drop_values:
drop_values[0] = None
if 1 not in drop_values:
drop_values[1] = None
rm_from_index = (
[obj for obj in compute_na.index[drop_values[1]]]
if drop_values[1] is not None
else None
)
rm_from_columns = (
[obj for obj in compute_na.columns[drop_values[0]]]
if drop_values[0] is not None
else None
)
else:
rm_from_index = (
compute_na.index[drop_values[1]] if drop_values[1] is not None else None
)
rm_from_columns = (
compute_na.columns[drop_values[0]]
if drop_values[0] is not None
else None
)
return self.drop(index=rm_from_index, columns=rm_from_columns)
def eval(self, expr, **kwargs):
"""Returns a new DataManager with expr evaluated on columns.
Args:
expr: The string expression to evaluate.
Returns:
A new PandasDataManager with new columns after applying expr.
"""
inplace = kwargs.get("inplace", False)
columns = self.index if self._is_transposed else self.columns
index = self.columns if self._is_transposed else self.index
# Make a copy of columns and eval on the copy to determine if result type is
# series or not
columns_copy = pandas.DataFrame(columns=self.columns)
columns_copy = columns_copy.eval(expr, inplace=False, **kwargs)
expect_series = isinstance(columns_copy, pandas.Series)
# if there is no assignment, then we simply save the results
# in the first column
if expect_series:
if inplace:
raise ValueError("Cannot operate inplace if there is no assignment")
else:
expr = "{0} = {1}".format(columns[0], expr)
def eval_builder(df, **kwargs):
df.columns = columns
result = df.eval(expr, inplace=False, **kwargs)
result.columns = pandas.RangeIndex(0, len(result.columns))
return result
func = self._prepare_method(eval_builder, **kwargs)
new_data = self.map_across_full_axis(1, func)
if expect_series:
result = new_data.to_pandas()[0]
result.name = columns_copy.name
result.index = index
return result
else:
columns = columns_copy.columns
return self.__constructor__(new_data, self.index, columns)
def mode(self, **kwargs):
"""Returns a new DataManager with modes calculated for each label along given axis.
Returns:
A new PandasDataManager with modes calculated.
"""
axis = kwargs.get("axis", 0)
def mode_builder(df, **kwargs):
result = df.mode(**kwargs)
# We return a dataframe with the same shape as the input to ensure
# that all the partitions will be the same shape
if not axis and len(df) != len(result):
# Pad columns
append_values = pandas.DataFrame(
columns=result.columns, index=range(len(result), len(df))
)
result = pandas.concat([result, append_values], ignore_index=True)
elif axis and len(df.columns) != len(result.columns):
# Pad rows
append_vals = pandas.DataFrame(
columns=range(len(result.columns), len(df.columns)),
index=result.index,
)
result = pandas.concat([result, append_vals], axis=1)
return result
func = self._prepare_method(mode_builder, **kwargs)
new_data = self.map_across_full_axis(axis, func)
new_index = pandas.RangeIndex(len(self.index)) if not axis else self.index
new_columns = self.columns if not axis else pandas.RangeIndex(len(self.columns))
return self.__constructor__(
new_data, new_index, new_columns, self._dtype_cache
).dropna(axis=axis, how="all")
def fillna(self, **kwargs):
"""Replaces NaN values with the method provided.
Returns:
A new PandasDataManager with null values filled.
"""
axis = kwargs.get("axis", 0)
value = kwargs.get("value")
if isinstance(value, dict):
value = kwargs.pop("value")
if axis == 0:
index = self.columns
else:
index = self.index
value = {
idx: value[key] for key in value for idx in index.get_indexer_for([key])
}
def fillna_dict_builder(df, func_dict={}):
# We do this to ensure that no matter the state of the columns we get
# the correct ones.
func_dict = {df.columns[idx]: func_dict[idx] for idx in func_dict}
return df.fillna(value=func_dict, **kwargs)
new_data = self.data.apply_func_to_select_indices(
axis, fillna_dict_builder, value, keep_remaining=True
)
return self.__constructor__(new_data, self.index, self.columns)
else:
func = self._prepare_method(pandas.DataFrame.fillna, **kwargs)
new_data = self.map_across_full_axis(axis, func)
return self.__constructor__(new_data, self.index, self.columns)
def query(self, expr, **kwargs):
"""Query columns of the DataManager with a boolean expression.
Args:
expr: Boolean expression to query the columns with.
Returns:
DataManager containing the rows where the boolean expression is satisfied.
"""
columns = self.columns
def query_builder(df, **kwargs):
# This is required because of an Arrow limitation
# TODO revisit for Arrow error
df = df.copy()
df.index = pandas.RangeIndex(len(df))
df.columns = columns
df.query(expr, inplace=True, **kwargs)
df.columns = pandas.RangeIndex(len(df.columns))
return df
func = self._prepare_method(query_builder, **kwargs)
new_data = self.map_across_full_axis(1, func)
# Query removes rows, so we need to update the index
new_index = self.compute_index(0, new_data, True)
return self.__constructor__(new_data, new_index, self.columns, self.dtypes)
def rank(self, **kwargs):
"""Computes numerical rank along axis. Equal values are set to the average.
Returns:
DataManager containing the ranks of the values along an axis.
"""
axis = kwargs.get("axis", 0)
numeric_only = True if axis else kwargs.get("numeric_only", False)
func = self._prepare_method(pandas.DataFrame.rank, **kwargs)
new_data = self.map_across_full_axis(axis, func)
# Since we assume no knowledge of internal state, we get the columns
# from the internal partitions.
if numeric_only:
new_columns = self.compute_index(1, new_data, True)
else:
new_columns = self.columns
new_dtypes = pandas.Series([np.float64 for _ in new_columns], index=new_columns)
return self.__constructor__(new_data, self.index, new_columns, new_dtypes)
def sort_index(self, **kwargs):
"""Sorts the data with respect to either the columns or the indices.
Returns:
DataManager containing the data sorted by columns or indices.
"""
axis = kwargs.pop("axis", 0)
index = self.columns if axis else self.index
# sort_index can have ascending be None and behaves as if it is False.
# sort_values cannot have ascending be None. Thus, the following logic is to
# convert the ascending argument to one that works with sort_values
ascending = kwargs.pop("ascending", True)
if ascending is None:
ascending = False
kwargs["ascending"] = ascending
def sort_index_builder(df, **kwargs):
if axis:
df.columns = index
else:
df.index = index
return df.sort_index(axis=axis, **kwargs)
func = self._prepare_method(sort_index_builder, **kwargs)
new_data = self.map_across_full_axis(axis, func)
if axis:
new_columns = pandas.Series(self.columns).sort_values(**kwargs)
new_index = self.index
else:
new_index = pandas.Series(self.index).sort_values(**kwargs)
new_columns = self.columns
return self.__constructor__(
new_data, new_index, new_columns, self.dtypes.copy()
)
# END Map across rows/columns
# Map across rows/columns
# These operations require some global knowledge of the full column/row
# that is being operated on. This means that we have to put all of that
# data in the same place.
def map_across_full_axis_select_indices(
self, axis, func, indices, keep_remaining=False
):
"""Maps function to select indices along full axis.
Args:
axis: 0 for columns and 1 for rows.
func: Callable mapping function over the BlockParitions.
indices: indices along axis to map over.
keep_remaining: True if keep indices where function was not applied.
Returns:
BaseBlockPartitions containing the result of mapping func over axis on indices.
"""
return self.data.apply_func_to_select_indices_along_full_axis(
axis, func, indices, keep_remaining
)
def quantile_for_list_of_values(self, **kwargs):
"""Returns Manager containing quantiles along an axis for numeric columns.
Returns:
DataManager containing quantiles of original DataManager along an axis.
"""
axis = kwargs.get("axis", 0)
q = kwargs.get("q")
numeric_only = kwargs.get("numeric_only", True)
assert isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list))
if numeric_only:
new_columns = self.numeric_columns()
else:
new_columns = [
col
for col, dtype in zip(self.columns, self.dtypes)
if (is_numeric_dtype(dtype) or is_datetime_or_timedelta_dtype(dtype))
]
if axis:
# If along rows, then drop the nonnumeric columns, record the index, and
# take transpose. We have to do this because if we don't, the result is all
# in one column for some reason.
nonnumeric = [
col
for col, dtype in zip(self.columns, self.dtypes)
if not is_numeric_dtype(dtype)
]
query_compiler = self.drop(columns=nonnumeric)
new_columns = query_compiler.index
numeric_indices = list(query_compiler.index.get_indexer_for(new_columns))
query_compiler = query_compiler.transpose()
kwargs.pop("axis")
else:
query_compiler = self
numeric_indices = list(self.columns.get_indexer_for(new_columns))
def quantile_builder(df, internal_indices=[], **kwargs):
return pandas.DataFrame.quantile(df, **kwargs)
func = self._prepare_method(quantile_builder, **kwargs)
q_index = pandas.Float64Index(q)
new_data = query_compiler.map_across_full_axis_select_indices(
0, func, numeric_indices
)
return self.__constructor__(new_data, q_index, new_columns)
# END Map across rows/columns
# Head/Tail/Front/Back
def head(self, n):
"""Returns the first n rows.
Args:
n: Integer containing the number of rows to return.
Returns:
DataManager containing the first n rows of the original DataManager.
"""
# We grab the front if it is transposed and flag as transposed so that
# we are not physically updating the data from this manager. This
# allows the implementation to stay modular and reduces data copying.
if n < 0:
n = max(0, len(self.index) + n)
if self._is_transposed:
# Transpose the blocks back to their original orientation first to
# ensure that we extract the correct data on each node. The index
# on a transposed manager is already set to the correct value, so
# we need to only take the head of that instead of re-transposing.
result = self.__constructor__(
self.data.transpose().take(1, n).transpose(),
self.index[:n],
self.columns,
self._dtype_cache,
)
result._is_transposed = True
else:
result = self.__constructor__(
self.data.take(0, n), self.index[:n], self.columns, self._dtype_cache
)
return result
def tail(self, n):
"""Returns the last n rows.
Args:
n: Integer containing the number of rows to return.
Returns:
DataManager containing the last n rows of the original DataManager.
"""
# See head for an explanation of the transposed behavior
if n < 0:
n = max(0, len(self.index) + n)
if n == 0:
index = pandas.Index([])
else:
index = self.index[-n:]
if self._is_transposed:
result = self.__constructor__(
self.data.transpose().take(1, -n).transpose(),
index,
self.columns,
self._dtype_cache,
)
result._is_transposed = True
else:
result = self.__constructor__(
self.data.take(0, -n), index, self.columns, self._dtype_cache
)
return result
def front(self, n):
"""Returns the first n columns.
Args:
n: Integer containing the number of columns to return.
Returns:
DataManager containing the first n columns of the original DataManager.
"""
new_dtypes = (
self._dtype_cache if self._dtype_cache is None else self._dtype_cache[:n]
)
# See head for an explanation of the transposed behavior
if self._is_transposed:
result = self.__constructor__(
self.data.transpose().take(0, n).transpose(),
self.index,
self.columns[:n],
new_dtypes,
)
result._is_transposed = True
else:
result = self.__constructor__(
self.data.take(1, n), self.index, self.columns[:n], new_dtypes
)
return result
def back(self, n):
"""Returns the last n columns.
Args:
n: Integer containing the number of columns to return.
Returns:
DataManager containing the last n columns of the original DataManager.
"""
new_dtypes = (
self._dtype_cache if self._dtype_cache is None else self._dtype_cache[-n:]
)
# See head for an explanation of the transposed behavior
if self._is_transposed:
result = self.__constructor__(
self.data.transpose().take(0, -n).transpose(),
self.index,
self.columns[-n:],
new_dtypes,
)
result._is_transposed = True
else:
result = self.__constructor__(
self.data.take(1, -n), self.index, self.columns[-n:], new_dtypes
)
return result
# End Head/Tail/Front/Back
# Data Management Methods
def free(self):
"""In the future, this will hopefully trigger a cleanup of this object.
"""
# TODO create a way to clean up this object.
return
# END Data Management Methods
# To/From Pandas
def to_pandas(self):
"""Converts Modin DataFrame to Pandas DataFrame.
Returns:
Pandas DataFrame of the DataManager.
"""
df = self.data.to_pandas(is_transposed=self._is_transposed)
if df.empty:
dtype_dict = {
col_name: pandas.Series(dtype=self.dtypes[col_name])
for col_name in self.columns
}
df = pandas.DataFrame(dtype_dict, self.index)
else:
ErrorMessage.catch_bugs_and_request_email(
len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
)
df.index = self.index
df.columns = self.columns
return df
@classmethod
def from_pandas(cls, df, block_partitions_cls):
"""Improve simple Pandas DataFrame to an advanced and superior Modin DataFrame.
Args:
cls: DataManger object to convert the DataFrame to.
df: Pandas DataFrame object.
block_partitions_cls: BlockParitions object to store partitions
Returns:
Returns DataManager containing data from the Pandas DataFrame.
"""
new_index = df.index
new_columns = df.columns
new_dtypes = df.dtypes
new_data = block_partitions_cls.from_pandas(df)
return cls(new_data, new_index, new_columns, dtypes=new_dtypes)
# __getitem__ methods
def getitem_single_key(self, key):
"""Get item for a single target index.
Args:
key: Target index by which to retrieve data.
Returns:
A new PandasDataManager.
"""
new_data = self.getitem_column_array([key])
if len(self.columns.get_indexer_for([key])) > 1:
return new_data
else:
# This is the case that we are returning a single Series.
# We do this post processing because everything is treated a a list
# from here on, and that will result in a DataFrame.
return new_data.to_pandas()[key]
def getitem_column_array(self, key):
"""Get column data for target labels.
Args:
key: Target labels by which to retrieve data.
Returns:
A new PandasDataManager.
"""
# Convert to list for type checking
numeric_indices = list(self.columns.get_indexer_for(key))
# Internal indices is left blank and the internal
# `apply_func_to_select_indices` will do the conversion and pass it in.
def getitem(df, internal_indices=[]):
return df.iloc[:, internal_indices]
result = self.data.apply_func_to_select_indices(
0, getitem, numeric_indices, keep_remaining=False
)
# We can't just set the columns to key here because there may be
# multiple instances of a key.
new_columns = self.columns[numeric_indices]
new_dtypes = self.dtypes[numeric_indices]
return self.__constructor__(result, self.index, new_columns, new_dtypes)
def getitem_row_array(self, key):
"""Get row data for target labels.
Args:
key: Target numeric indices by which to retrieve data.
Returns:
A new PandasDataManager.
"""
# Convert to list for type checking
key = list(key)
def getitem(df, internal_indices=[]):
return df.iloc[internal_indices]
result = self.data.apply_func_to_select_indices(
1, getitem, key, keep_remaining=False
)
# We can't just set the index to key here because there may be multiple
# instances of a key.
new_index = self.index[key]
return self.__constructor__(result, new_index, self.columns, self._dtype_cache)
# END __getitem__ methods
# __delitem__ and drop
# These will change the shape of the resulting data.
def delitem(self, key):
return self.drop(columns=[key])
def drop(self, index=None, columns=None):
"""Remove row data for target index and columns.
Args:
index: Target index to drop.
columns: Target columns to drop.
Returns:
A new PandasDataManager.
"""
if index is None:
new_data = self.data
new_index = self.index
else:
def delitem(df, internal_indices=[]):
return df.drop(index=df.index[internal_indices])
numeric_indices = list(self.index.get_indexer_for(index))
new_data = self.data.apply_func_to_select_indices(
1, delitem, numeric_indices, keep_remaining=True
)
# We can't use self.index.drop with duplicate keys because in Pandas
# it throws an error.
new_index = self.index[~self.index.isin(index)]
if columns is None:
new_columns = self.columns
new_dtypes = self.dtypes
else:
def delitem(df, internal_indices=[]):
return df.drop(columns=df.columns[internal_indices])
numeric_indices = list(self.columns.get_indexer_for(columns))
new_data = new_data.apply_func_to_select_indices(
0, delitem, numeric_indices, keep_remaining=True
)
new_columns = self.columns[~self.columns.isin(columns)]
new_dtypes = self.dtypes.drop(columns)
return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
# END __delitem__ and drop
# Insert
# This method changes the shape of the resulting data. In Pandas, this
# operation is always inplace, but this object is immutable, so we just
# return a new one from here and let the front end handle the inplace
# update.
def insert(self, loc, column, value):
"""Insert new column data.
Args:
loc: Insertion index.
column: Column labels to insert.
value: Dtype object values to insert.
Returns:
A new PandasQueryCompiler with new data inserted.
"""
if is_list_like(value):
from modin.pandas.series import SeriesView
if isinstance(value, (pandas.Series, SeriesView)):
value = value.reindex(self.index)
value = list(value)
def insert(df, internal_indices=[]):
internal_idx = int(internal_indices[0])
old_index = df.index
df.index = pandas.RangeIndex(len(df.index))
df.insert(internal_idx, internal_idx, value, allow_duplicates=True)
df.columns = pandas.RangeIndex(len(df.columns))
df.index = old_index
return df
new_data = self.data.apply_func_to_select_indices_along_full_axis(
0, insert, loc, keep_remaining=True
)
new_columns = self.columns.insert(loc, column)
return self.__constructor__(new_data, self.index, new_columns)
# END Insert
# UDF (apply and agg) methods
# There is a wide range of behaviors that are supported, so a lot of the
# logic can get a bit convoluted.
def apply(self, func, axis, *args, **kwargs):
"""Apply func across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
if callable(func):
return self._callable_func(func, axis, *args, **kwargs)
elif isinstance(func, dict):
return self._dict_func(func, axis, *args, **kwargs)
elif is_list_like(func):
return self._list_like_func(func, axis, *args, **kwargs)
else:
pass
def _post_process_apply(self, result_data, axis, try_scale=True):
"""Recompute the index after applying function.
Args:
result_data: a BaseBlockPartitions object.
axis: Target axis along which function was applied.
Returns:
A new PandasQueryCompiler.
"""
if try_scale:
try:
internal_index = self.compute_index(0, result_data, True)
except IndexError:
internal_index = self.compute_index(0, result_data, False)
try:
internal_columns = self.compute_index(1, result_data, True)
except IndexError:
internal_columns = self.compute_index(1, result_data, False)
else:
internal_index = self.compute_index(0, result_data, False)
internal_columns = self.compute_index(1, result_data, False)
if not axis:
index = internal_index
# We check if the two columns are the same length because if
# they are the same length, `self.columns` is the correct index.
# However, if the operation resulted in a different number of columns,
# we must use the derived columns from `self.compute_index()`.
if len(internal_columns) != len(self.columns):
columns = internal_columns
else:
columns = self.columns
else:
# See above explaination for checking the lengths of columns
if len(internal_index) != len(self.index):
index = internal_index
else:
index = self.index
columns = internal_columns
# `apply` and `aggregate` can return a Series or a DataFrame object,
# and since we need to handle each of those differently, we have to add
# this logic here.
if len(columns) == 0:
series_result = result_data.to_pandas(False)
if not axis and len(series_result) == len(self.columns):
index = self.columns
elif axis and len(series_result) == len(self.index):
index = self.index
series_result.index = index
return series_result
return self.__constructor__(result_data, index, columns)
def _dict_func(self, func, axis, *args, **kwargs):
"""Apply function to certain indices across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
if "axis" not in kwargs:
kwargs["axis"] = axis
if axis == 0:
index = self.columns
else:
index = self.index
func = {idx: func[key] for key in func for idx in index.get_indexer_for([key])}
def dict_apply_builder(df, func_dict={}):
return df.apply(func_dict, *args, **kwargs)
result_data = self.data.apply_func_to_select_indices_along_full_axis(
axis, dict_apply_builder, func, keep_remaining=False
)
full_result = self._post_process_apply(result_data, axis)
# The columns can get weird because we did not broadcast them to the
# partitions and we do not have any guarantee that they are correct
# until here. Fortunately, the keys of the function will tell us what
# the columns are.
if isinstance(full_result, pandas.Series):
full_result.index = [self.columns[idx] for idx in func]
return full_result
def _list_like_func(self, func, axis, *args, **kwargs):
"""Apply list-like function across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
func_prepared = self._prepare_method(lambda df: df.apply(func, *args, **kwargs))
new_data = self.map_across_full_axis(axis, func_prepared)
# When the function is list-like, the function names become the index
new_index = [f if isinstance(f, string_types) else f.__name__ for f in func]
return self.__constructor__(new_data, new_index, self.columns)
def _callable_func(self, func, axis, *args, **kwargs):
"""Apply callable functions across given axis.
Args:
func: The functions to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
def callable_apply_builder(df, func, axis, index, *args, **kwargs):
if not axis:
df.index = index
df.columns = pandas.RangeIndex(len(df.columns))
else:
df.columns = index
df.index = pandas.RangeIndex(len(df.index))
result = df.apply(func, axis=axis, *args, **kwargs)
return result
index = self.index if not axis else self.columns
func_prepared = self._prepare_method(
lambda df: callable_apply_builder(df, func, axis, index, *args, **kwargs)
)
result_data = self.map_across_full_axis(axis, func_prepared)
return self._post_process_apply(result_data, axis)
# END UDF
# Manual Partitioning methods (e.g. merge, groupby)
# These methods require some sort of manual partitioning due to their
# nature. They require certain data to exist on the same partition, and
# after the shuffle, there should be only a local map required.
def _manual_repartition(self, axis, repartition_func, **kwargs):
"""This method applies all manual partitioning functions.
Args:
axis: The axis to shuffle data along.
repartition_func: The function used to repartition data.
Returns:
A `BaseBlockPartitions` object.
"""
func = self._prepare_method(repartition_func, **kwargs)
return self.data.manual_shuffle(axis, func)
def groupby_agg(self, by, axis, agg_func, groupby_args, agg_args):
remote_index = self.index if not axis else self.columns
def groupby_agg_builder(df):
if not axis:
df.index = remote_index
else:
df.columns = remote_index
grouped_df = df.groupby(by=by, axis=axis, **groupby_args)
try:
return agg_func(grouped_df, **agg_args)
# This happens when the partition is filled with non-numeric data and a
# numeric operation is done. We need to build the index here to avoid issues
# with extracting the index.
except DataError:
return pandas.DataFrame(index=grouped_df.count().index)
func_prepared = self._prepare_method(lambda df: groupby_agg_builder(df))
result_data = self.map_across_full_axis(axis, func_prepared)
return self._post_process_apply(result_data, axis, try_scale=False)
# END Manual Partitioning methods
def get_dummies(self, columns, **kwargs):
"""Convert categorical variables to dummy variables for certain columns.
Args:
columns: The columns to convert.
Returns:
A new PandasDataManager.
"""
cls = type(self)
# `columns` as None does not mean all columns, by default it means only
# non-numeric columns.
if columns is None:
columns = [c for c in self.columns if not is_numeric_dtype(self.dtypes[c])]
# If we aren't computing any dummies, there is no need for any
# remote compute.
if len(columns) == 0:
return self.copy()
elif not is_list_like(columns):
columns = [columns]
# We have to do one of two things in order to ensure the final columns
# are correct. Our first option is to map over the data and assign the
# columns in a separate pass. That is what we have chosen to do here.
# This is not as efficient, but it requires less information from the
# lower layers and does not break any of our internal requirements. The
# second option is that we assign the columns as a part of the
# `get_dummies` call. This requires knowledge of the length of each
# partition, and breaks some of our assumptions and separation of
# concerns.
def set_columns(df, columns):
df.columns = columns
return df
set_cols = self.columns
columns_applied = self.map_across_full_axis(
1, lambda df: set_columns(df, set_cols)
)
# In some cases, we are mapping across all of the data. It is more
# efficient if we are mapping over all of the data to do it this way
# than it would be to reuse the code for specific columns.
if len(columns) == len(self.columns):
def get_dummies_builder(df):
if df is not None:
if not df.empty:
return pandas.get_dummies(df, **kwargs)
else:
return pandas.DataFrame([])
func = self._prepare_method(lambda df: get_dummies_builder(df))
new_data = columns_applied.map_across_full_axis(0, func)
untouched_data = None
else:
def get_dummies_builder(df, internal_indices=[]):
return pandas.get_dummies(
df.iloc[:, internal_indices], columns=None, **kwargs
)
numeric_indices = list(self.columns.get_indexer_for(columns))
new_data = columns_applied.apply_func_to_select_indices_along_full_axis(
0, get_dummies_builder, numeric_indices, keep_remaining=False
)
untouched_data = self.drop(columns=columns)
# Since we set the columns in the beginning, we can just extract them
# here. There is fortunately no required extra steps for a correct
# column index.
final_columns = self.compute_index(1, new_data, False)
# If we mapped over all the data we are done. If not, we need to
# prepend the `new_data` with the raw data from the columns that were
# not selected.
if len(columns) != len(self.columns):
new_data = untouched_data.data.concat(1, new_data)
final_columns = untouched_data.columns.append( | pandas.Index(final_columns) | pandas.Index |
#import requests
#youtube=requests.get(youtube_trending_url)
#youtube1=youtube.text
#print(youtube.status_code)
#print(len(youtube1))
#from bs4 import BeautifulSoup
#doc = BeautifulSoup(youtube1, 'html.parser')
youtube_trending_url='https://youtube.com/trending'
#response=requests.get(youtube_trending_url)
#with open('trending.html', 'w')as f:
# f.write(response.text)
import pandas as pd
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
def get_driver():
chrome_options = Options()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-dev-shm-usage')
driver = webdriver.Chrome(options=chrome_options)
return driver
def get_videos(driver):
driver.get(youtube_trending_url)
videos=driver.find_elements(By.TAG_NAME,'ytd-video-renderer')
print(f'count {len(videos)} videos')
return videos
def parse_video(video):
title_tag=video.find_element(By.ID,'video-title')
title=title_tag.text
video_url=title_tag.get_attribute('href')
thumbnail_tag=video.find_element(By.TAG_NAME,'img')
thumbnail_url=thumbnail_tag.get_attribute('src')
views=video.find_element(By.XPATH,'//*[@id="metadata-line"]/span[1]')
views=views.text
upload=video.find_element(By.XPATH,'//*[@id="metadata-line"]/span[2]')
upload=upload.text
return{
'title':title,
'video_url':video_url,
'thumbnail_url':thumbnail_url,
'views':views,
'upload time':upload,
}
if __name__=="__main__":
print('creating driver: ')
driver = get_driver()
print('getting videos list')
videos=get_videos(driver)
print('parsing video')
videos_data=[parse_video(video) for video in videos[:10]]
videos_df= | pd.DataFrame(videos_data) | pandas.DataFrame |
"""
Evaluation
----------
Evaluation metrics and plotting techniques for models.
Based on
Uber.Causal ML: A Python Package for Uplift Modeling and Causal Inference with ML. (2019).
URL:https://github.com/uber/causalml.
<NAME>. & <NAME>. (2011). Real-World Uplift Modelling with Significance-Based Uplift Trees.
Technical Report TR-2011-1, Stochastic Solutions, 2011, pp. 1-33.
<NAME>., <NAME>. & <NAME>. (2014). Mining for the truly responsive customers and prospects using
true-lift modeling: Comparison of new and existing methods. Journal of Marketing Analytics, Vol. 2,
No. 4, December 2014, pp 218–238.
<NAME>., <NAME>. & <NAME>. (2015). Ensemble methods for uplift modeling.
Data Mining and Knowledge Discovery, Vol. 29, No. 6, November 2015, pp. 1531–1559.
Note
For evaluation functions:
If the true treatment effect is provided (e.g. in synthetic data), it's calculated
as the cumulative gain of the true treatment effect in each population.
Otherwise, it's calculated as the cumulative difference between the mean outcomes
of the treatment and control groups in each population.
For the former, `treatment_effect_col` should be provided. For the latter, both
`outcome_col` and `treatment_col` should be provided.
Contents
plot_eval,
get_cum_effect,
get_cum_gain,
get_qini,
plot_cum_effect,
plot_cum_gain,
plot_qini,
auuc_score,
qini_score,
get_batch_metrics,
plot_batch_metrics,
plot_batch_effects (WIP),
plot_batch_gains (WIP),
plot_batch_qinis (WIP),
plot_batch_responses,
signal_to_noise,
iterate_model,
eval_table
"""
import matplotlib.ticker as mtick
import numpy as np
import pandas as pd
import seaborn as sns
from tqdm.auto import tqdm
RANDOM_COL = "random"
def plot_eval(
df,
kind=None,
n=100,
percent_of_pop=False,
normalize=False,
figsize=(15, 5),
fontsize=20,
axis=None,
legend_metrics=None,
*args,
**kwargs,
):
"""
Plots one of the effect/gain/qini charts of model estimates.
Parameters
----------
df : pandas.DataFrame
A data frame with model estimates and unit outcomes as columns.
kind : str : optional (default='gain')
The kind of plot to draw: 'effect,' 'gain,' and 'qini' are supported.
n : int, optional (default=100)
The number of samples to be used for plotting.
percent_of_pop : bool : optional (default=False)
Whether the X-axis is displayed as a percent of the whole population.
normalize : bool : for inheritance (default=False)
Passes this argument to interior functions directly.
figsize : tuple : optional
Allows for quick changes of figures sizes.
fontsize : int or float : optional (default=20)
The font size of the plots, with all labels scaled accordingly.
axis : str : optional (default=None)
Adds an axis to the plot so they can be combined.
legend_metrics : bool : optional (default=True)
Calculate AUUC or Qini metrics to add to the plot legend for gain and qini respectively.
"""
# Add ability to have straight random targeting line.
catalog = {"effect": get_cum_effect, "gain": get_cum_gain, "qini": get_qini}
assert kind in catalog, (
f"{kind} for plot_eval is not implemented. Select one of "
+ ", ".join(list(catalog.keys()))
) + "."
# Pass one of the plot types and its arguments.
df_metrics = catalog[kind](df=df, normalize=normalize, *args, **kwargs)
if (n is not None) and (n < df_metrics.shape[0]):
df_metrics = df_metrics.iloc[
np.linspace(start=0, stop=df_metrics.index[-1], num=n, endpoint=True)
]
# Adaptable figure features.
if figsize:
sns.set(rc={"figure.figsize": figsize})
# Shifts the color palette such that models are the same color across
# line and batch plots.
# Random line is the first in line plots, such that it's solid.
sns.set_palette("deep") # Default
color_palette = sns.color_palette()
color_palette.insert(0, color_palette.pop())
sns.set_palette(color_palette)
ax = sns.lineplot(data=df_metrics, ax=axis)
if legend_metrics:
if kind == "gain":
metric_label = "auuc"
metrics = auuc_score(df=df, normalize=normalize, *args, **kwargs)
elif kind == "qini":
metric_label = "qini"
metrics = qini_score(df=df, normalize=normalize, *args, **kwargs)
elif kind == "effect":
print(
"Display metrics are AUUC or Qini, and are thus not supported for Incremental Effect Plots."
)
print("The plot will be done without them.")
legend_metrics = False # Turn off for next line
if legend_metrics:
metric_labels = ["{}: {:.4f}".format(metric_label, m) for m in metrics]
metric_labels[0] = "" # random column
new_labels = list(df_metrics.columns) + metric_labels
ax.legend(title="Models", labels=new_labels, ncol=2)
else:
ax.legend(title="Models")
plot_x_label = "Population Targeted"
if percent_of_pop:
plot_x_label += " (%)"
ax.xaxis.set_major_formatter(mtick.PercentFormatter(xmax=df.shape[0]))
ax.set_xlabel(plot_x_label, fontsize=fontsize)
ax.set_ylabel("Cumulative Incremental Change", fontsize=fontsize)
plot_title = "Incremental {}".format(kind.title())
if normalize and kind in ["gain", "qini"]:
plot_title += " (Normalized)"
ax.axes.set_title(plot_title, fontsize=fontsize * 1.5)
def get_cum_effect(
df,
models=None,
outcome_col="y",
treatment_col="w",
treatment_effect_col="tau",
normalize=False,
random_seed=None,
):
"""
Gets average causal effects of model estimates in cumulative population.
Parameters
----------
df : pandas.DataFrame
A data frame with model estimates and actual data as columns.
models : list
A list of models corresponding to estimated treatment effect columns.
outcome_col : str : optional (default=y)
The column name for the actual outcome.
treatment_col : str : optional (default=w)
The column name for the treatment indicator (0 or 1).
treatment_effect_col : str : optional (default=tau)
The column name for the true treatment effect.
normalize : bool : not implemented (default=False)
For consitency with gain and qini.
random_seed : int, optional (default=None)
Random seed for numpy.random.rand().
Returns
-------
effects : pandas.DataFrame
Average causal effects of model estimates in cumulative population.
"""
assert (
(outcome_col in df.columns)
and (treatment_col in df.columns)
or treatment_effect_col in df.columns
), "Either the outcome_col and treatment_col arguments must be provided, or the treatment_effect_col argument."
df = df.copy()
np.random.seed(random_seed)
random_cols = []
for i in range(10):
random_col = "__random_{}__".format(i)
# Generate random values in (0,1] to compare against on average.
df[random_col] = np.random.rand(df.shape[0])
random_cols.append(random_col)
if isinstance(models, str):
models = [models]
model_and_random_preds = [x for x in df.columns if x in models + random_cols]
effects = []
for col in model_and_random_preds:
# Sort by model estimates, and get the cumulative sum of treatment
# along the new sorted axis.
df = df.sort_values(col, ascending=False).reset_index(drop=True)
df.index = df.index + 1
df["cumsum_treatment"] = df[treatment_col].cumsum()
if treatment_effect_col in df.columns:
# Calculate iterated average treatment effects of simulated data.
iterated_effect = df[treatment_effect_col].cumsum() / df.index
else:
# Calculate iterated average treatment effects using unit outcomes.
df["cumsum_control"] = df.index.values - df["cumsum_treatment"]
df["cumsum_y_treatment"] = (df[outcome_col] * df[treatment_col]).cumsum()
df["cumsum_y_control"] = (
df[outcome_col] * (1 - df[treatment_col])
).cumsum()
iterated_effect = (
df["cumsum_y_treatment"] / df["cumsum_treatment"]
- df["cumsum_y_control"] / df["cumsum_control"]
)
effects.append(iterated_effect)
effects = pd.concat(effects, join="inner", axis=1)
effects.loc[0] = np.zeros((effects.shape[1],)) # start from 0
effects = effects.sort_index().interpolate()
effects.columns = model_and_random_preds
effects[RANDOM_COL] = effects[random_cols].mean(axis=1)
effects.drop(random_cols, axis=1, inplace=True)
cols = effects.columns.tolist()
cols.insert(0, cols.pop(cols.index(RANDOM_COL)))
effects = effects.reindex(columns=cols)
return effects
def get_cum_gain(
df,
models=None,
outcome_col="y",
treatment_col="w",
treatment_effect_col="tau",
normalize=False,
random_seed=None,
):
"""
Gets cumulative gains of model estimates in population.
Parameters
----------
df : pandas.DataFrame
A data frame with model estimates and actual data as columns.
models : list
A list of models corresponding to estimated treatment effect columns.
outcome_col : str : optional (default=y)
The column name for the actual outcome.
treatment_col : str : optional (default=w)
The column name for the treatment indicator (0 or 1).
treatment_effect_col : str : optional (default=tau)
The column name for the true treatment effect.
normalize : bool : optional (default=False)
Whether to normalize the y-axis to 1 or not.
random_seed : int, optional (default=None)
Random seed for numpy.random.rand().
Returns
-------
gains : pandas.DataFrame
Cumulative gains of model estimates in population.
"""
effects = get_cum_effect(
df=df,
models=models,
outcome_col=outcome_col,
treatment_col=treatment_col,
treatment_effect_col=treatment_effect_col,
random_seed=random_seed,
)
# Cumulative gain is the cumulative causal effect of the population.
gains = effects.mul(effects.index.values, axis=0)
if normalize:
gains = gains.div(np.abs(gains.iloc[-1, :]), axis=1)
return gains
def get_qini(
df,
models=None,
outcome_col="y",
treatment_col="w",
treatment_effect_col="tau",
normalize=False,
random_seed=None,
):
"""
Gets Qini of model estimates in population.
Parameters
----------
df : pandas.DataFrame
A data frame with model estimates and actual data as columns.
models : list
A list of models corresponding to estimated treatment effect columns.
outcome_col : str : optional (default=y)
The column name for the actual outcome.
treatment_col : str : optional (default=w)
The column name for the treatment indicator (0 or 1).
treatment_effect_col : str : optional (default=tau)
The column name for the true treatment effect.
normalize : bool : optional (default=False)
Whether to normalize the y-axis to 1 or not.
random_seed : int, optional (default=None)
Random seed for numpy.random.rand().
Returns
-------
qinis : pandas.DataFrame
Qini of model estimates in population.
"""
assert (
(outcome_col in df.columns)
and (treatment_col in df.columns)
or treatment_effect_col in df.columns
), "Either the outcome_col and treatment_col arguments must be provided, or the treatment_effect_col argument."
df = df.copy()
np.random.seed(random_seed)
random_cols = []
for i in range(10):
random_col = "__random_{}__".format(i)
# Generate random values in (0,1] to compare against on average.
df[random_col] = np.random.rand(df.shape[0])
random_cols.append(random_col)
if isinstance(models, str):
models = [models]
model_and_random_preds = [x for x in df.columns if x in models + random_cols]
qinis = []
for col in model_and_random_preds:
# Sort by model estimates, and get the cumulative sum of treatment
# along the new sorted axis.
df = df.sort_values(col, ascending=False).reset_index(drop=True)
df.index = df.index + 1
df["cumsum_treatment"] = df[treatment_col].cumsum()
if treatment_effect_col in df.columns:
# Calculate iterated average treatment effects of simulated data.
iterated_effect = (
df[treatment_effect_col].cumsum() / df.index * df["cumsum_treatment"]
)
else:
# Calculate iterated average treatment effects using unit outcomes.
df["cumsum_control"] = df.index.values - df["cumsum_treatment"]
df["cumsum_y_treatment"] = (df[outcome_col] * df[treatment_col]).cumsum()
df["cumsum_y_control"] = (
df[outcome_col] * (1 - df[treatment_col])
).cumsum()
iterated_effect = (
df["cumsum_y_treatment"]
- df["cumsum_y_control"] * df["cumsum_treatment"] / df["cumsum_control"]
)
qinis.append(iterated_effect)
qinis = pd.concat(qinis, join="inner", axis=1)
qinis.loc[0] = np.zeros((qinis.shape[1],)) # start from 0
qinis = qinis.sort_index().interpolate()
qinis.columns = model_and_random_preds
qinis[RANDOM_COL] = qinis[random_cols].mean(axis=1)
qinis.drop(random_cols, axis=1, inplace=True)
cols = qinis.columns.tolist()
cols.insert(0, cols.pop(cols.index(RANDOM_COL)))
qinis = qinis.reindex(columns=cols)
if normalize:
qinis = qinis.div(np.abs(qinis.iloc[-1, :]), axis=1)
return qinis
def plot_cum_effect(
df,
n=100,
models=None,
percent_of_pop=False,
outcome_col="y",
treatment_col="w",
treatment_effect_col="tau",
random_seed=None,
figsize=None,
fontsize=20,
axis=None,
legend_metrics=None,
):
"""
Plots the causal effect chart of model estimates in cumulative population.
Parameters
----------
df : pandas.DataFrame
A data frame with model estimates and actual data as columns.
kind : effect
The kind of plot to draw
n : int, optional (default=100)
The number of samples to be used for plotting.
models : list
A list of models corresponding to estimated treatment effect columns.
percent_of_pop : bool : optional (default=False)
Whether the X-axis is displayed as a percent of the whole population.
outcome_col : str : optional (default=y)
The column name for the actual outcome.
treatment_col : str : optional (default=w)
The column name for the treatment indicator (0 or 1).
treatment_effect_col : str : optional (default=tau)
The column name for the true treatment effect.
random_seed : int, optional (default=None)
Random seed for numpy.random.rand().
figsize : tuple : optional
Allows for quick changes of figures sizes.
fontsize : int or float : optional (default=20)
The font size of the plots, with all labels scaled accordingly.
axis : str : optional (default=None)
Adds an axis to the plot so they can be combined.
legend_metrics : bool : optional (default=False)
Not supported for plot_cum_effect - the user will be notified.
Returns
-------
A plot of the cumulative effects of all models in df.
"""
plot_eval(
df=df,
kind="effect",
n=n,
models=models,
percent_of_pop=percent_of_pop,
outcome_col=outcome_col,
treatment_col=treatment_col,
treatment_effect_col=treatment_effect_col,
random_seed=random_seed,
figsize=figsize,
fontsize=fontsize,
axis=axis,
legend_metrics=legend_metrics,
)
def plot_cum_gain(
df,
n=100,
models=None,
percent_of_pop=False,
outcome_col="y",
treatment_col="w",
treatment_effect_col="tau",
normalize=False,
random_seed=None,
figsize=None,
fontsize=20,
axis=None,
legend_metrics=True,
):
"""
Plots the cumulative gain chart (or uplift curve) of model estimates.
Parameters
----------
df : pandas.DataFrame
A data frame with model estimates and actual data as columns.
kind : gain
The kind of plot to draw
n : int, optional (default=100)
The number of samples to be used for plotting.
models : list
A list of models corresponding to estimated treatment effect columns.
percent_of_pop : bool : optional (default=False)
Whether the X-axis is displayed as a percent of the whole population.
outcome_col : str : optional (default=y)
The column name for the actual outcome.
treatment_col : str : optional (default=w)
The column name for the treatment indicator (0 or 1).
treatment_effect_col : str : optional (default=tau)
The column name for the true treatment effect.
normalize : bool : optional (default=False)
Whether to normalize the y-axis to 1 or not.
random_seed : int, optional (default=None)
Random seed for numpy.random.rand().
figsize : tuple : optional
Allows for quick changes of figures sizes.
fontsize : int or float : optional (default=20)
The font size of the plots, with all labels scaled accordingly.
axis : str : optional (default=None)
Adds an axis to the plot so they can be combined.
legend_metrics : bool : optional (default=True)
Calculates AUUC metrics to add to the plot legend.
Returns
-------
A plot of the cumulative gains of all models in df.
"""
plot_eval(
df=df,
kind="gain",
n=n,
models=models,
percent_of_pop=percent_of_pop,
outcome_col=outcome_col,
treatment_col=treatment_col,
treatment_effect_col=treatment_effect_col,
normalize=normalize,
random_seed=random_seed,
figsize=figsize,
fontsize=fontsize,
axis=axis,
legend_metrics=legend_metrics,
)
def plot_qini(
df,
n=100,
models=None,
percent_of_pop=False,
outcome_col="y",
treatment_col="w",
treatment_effect_col="tau",
normalize=False,
random_seed=None,
figsize=None,
fontsize=20,
axis=None,
legend_metrics=True,
):
"""
Plots the Qini chart (or uplift curve) of model estimates.
Parameters
----------
df : pandas.DataFrame
A data frame with model estimates and actual data as columns.
kind : qini
The kind of plot to draw
n : int, optional (default=100)
The number of samples to be used for plotting.
models : list
A list of models corresponding to estimated treatment effect columns.
percent_of_pop : bool : optional (default=False)
Whether the X-axis is displayed as a percent of the whole population.
outcome_col : str : optional (default=y)
The column name for the actual outcome.
treatment_col : str : optional (default=w)
The column name for the treatment indicator (0 or 1).
treatment_effect_col : str : optional (default=tau)
The column name for the true treatment effect.
normalize : bool : optional (default=False)
Whether to normalize the y-axis to 1 or not.
random_seed : int, optional (default=None)
Random seed for numpy.random.rand().
figsize : tuple : optional
Allows for quick changes of figures sizes.
fontsize : int or float : optional (default=20)
The font size of the plots, with all labels scaled accordingly.
axis : str : optional (default=None)
Adds an axis to the plot so they can be combined.
legend_metrics : bool : optional (default=True)
Calculates Qini metrics to add to the plot legend.
Returns
-------
A plot of the qini curves of all models in df.
"""
plot_eval(
df=df,
kind="qini",
n=n,
models=models,
percent_of_pop=percent_of_pop,
outcome_col=outcome_col,
treatment_col=treatment_col,
treatment_effect_col=treatment_effect_col,
normalize=normalize,
random_seed=random_seed,
figsize=figsize,
fontsize=fontsize,
axis=axis,
legend_metrics=legend_metrics,
)
def auuc_score(
df,
models=None,
outcome_col="y",
treatment_col="w",
treatment_effect_col="tau",
normalize=False,
random_seed=None,
):
"""
Calculates the AUUC score (Gini): the Area Under the Uplift Curve.
Parameters
----------
df : pandas.DataFrame
A data frame with model estimates and actual data as columns.
models : list
A list of models corresponding to estimated treatment effect columns.
outcome_col : str : optional (default=y)
The column name for the actual outcome.
treatment_col : str : optional (default=w)
The column name for the treatment indicator (0 or 1).
treatment_effect_col : str : optional (default=tau)
The column name for the true treatment effect.
normalize : bool : optional (default=False)
Whether to normalize the y-axis to 1 or not.
random_seed : int, for inheritance (default=None)
Random seed for numpy.random.rand().
Returns
-------
AUUC score : float
"""
gains = get_cum_gain(
df=df,
models=models,
outcome_col=outcome_col,
treatment_col=treatment_col,
treatment_effect_col=treatment_effect_col,
normalize=normalize,
)
return gains.sum() / gains.shape[0]
def qini_score(
df,
models=None,
outcome_col="y",
treatment_col="w",
treatment_effect_col="tau",
normalize=False,
random_seed=None,
):
"""
Calculates the Qini score: the area between the Qini curve of a model and random assignment.
Parameters
----------
df : pandas.DataFrame)
A data frame with model estimates and actual data as columns
models : list
A list of models corresponding to estimated treatment effect columns.
outcome_col : str : optional (default=y)
The column name for the actual outcome.
treatment_col : str : optional (default=w)
The column name for the treatment indicator (0 or 1).
treatment_effect_col : str : optional (default=tau)
The column name for the true treatment effect.
normalize : bool : optional (default=False)
Whether to normalize the y-axis to 1 or not.
random_seed : int, for inheritance (default=None)
Random seed for numpy.random.rand().
Returns
-------
Qini score : float
"""
qinis = get_qini(
df=df,
models=models,
outcome_col=outcome_col,
treatment_col=treatment_col,
treatment_effect_col=treatment_effect_col,
normalize=normalize,
)
return (qinis.sum(axis=0) - qinis[RANDOM_COL].sum()) / qinis.shape[0]
def get_batches(df, n=10, models=None, outcome_col="y", treatment_col="w"):
"""
Calculates the cumulative causal effects of models given batches from ranked treatment effects.
Parameters
----------
df : pandas.DataFrame
A data frame with model estimates and unit outcomes as columns.
n : int, optional (default=10, deciles; 5, quintiles also standard)
The number of batches to split the units into.
models : list
A list of models corresponding to estimated treatment effect columns.
outcome_col : str : optional (default=y)
The column name for the actual outcome.
treatment_col : str : optional (default=w)
The column name for the treatment indicator (0 or 1).
Returns
-------
df : pandas.DataFrame
The original dataframe with columns for model rank batches given n.
"""
assert (
np.isin(df[outcome_col].unique(), [0, 1]).all()
and np.isin(df[treatment_col].unique(), [0, 1]).all()
), "Batch metrics are currently only available for numeric-binary outcomes."
model_preds = [x for x in df.columns if x in models]
for col in model_preds:
df = df.sort_values(col, ascending=False).reset_index(drop=True)
df_batches = np.array_split(df, n)
# Get sublists of the length of the batch filled with the batch indexes.
sublist_of_batch_indexes = [
[i + 1 for j in range(len(b))] for i, b in enumerate(df_batches)
]
# Assign batches to units.
df["{}_batches".format(col)] = [
val for sublist in sublist_of_batch_indexes for val in sublist
]
return df
def plot_batch_metrics(
df,
kind=None,
n=10,
models=None,
outcome_col="y",
treatment_col="w",
normalize=False,
figsize=(15, 5),
fontsize=20,
axis=None,
*args,
**kwargs,
):
"""
Plots the batch chart: the cumulative batch metrics predicted by a model given ranked treatment effects.
Parameters
----------
df : pandas.DataFrame
A data frame with model estimates and unit outcomes as columns.
kind : str : optional (default='gain')
The kind of plot to draw: 'effect,' 'gain,' 'qini,' and 'response' are supported.
n : int, optional (default=10, deciles; 20, quintiles also standard)
The number of batches to split the units into.
models : list
A list of models corresponding to estimated treatment effect columns.
outcome_col : str : optional (default=y)
The column name for the actual outcome.
treatment_col : str : optional (default=w)
The column name for the treatment indicator (0 or 1).
figsize : tuple : optional
Allows for quick changes of figures sizes.
fontsize : int or float : optional (default=20)
The font size of the plots, with all labels scaled accordingly.
axis : str : optional (default=None)
Adds an axis to the plot so they can be combined.
Returns
-------
A plot of batch metrics of all models in df.
"""
catalog = {
"effect": get_cum_effect,
"gain": get_cum_gain,
"qini": get_qini,
"response": None,
}
assert kind in catalog, (
f"{kind} for plot_batch_metrics is not implemented. Select one of "
+ ", ".join(list(catalog.keys()))
+ "."
)
df_batches = get_batches(
df=df, n=n, models=models, outcome_col=outcome_col, treatment_col=treatment_col
)
df_batch_metrics = pd.DataFrame()
if kind in ["effect", "gain", "qini"]:
for i in range(n + 1)[1:]: # From 1 through n
batch_metrics = pd.DataFrame()
for model in models:
effect_metrics = catalog[kind](
df=df_batches[df_batches["{}_batches".format(model)] == i],
models=model,
outcome_col=outcome_col,
treatment_col=treatment_col,
normalize=normalize,
*args,
**kwargs,
)
if kind == "effect":
# Select last row, the cumsum effect for the model batch,
# make a df and transpose.
df_effect_metrics = pd.DataFrame(effect_metrics.iloc[-1, :]).T
batch_metrics = pd.concat(
[batch_metrics, df_effect_metrics], axis=1
)
elif kind == "gain":
# Cumulative gain is the cumulative causal effect of the population.
gain_metrics = effect_metrics.mul(
effect_metrics.index.values, axis=0
)
if normalize:
gain_metrics = gain_metrics.div(
np.abs(gain_metrics.iloc[-1, :]), axis=1
)
gain_metrics = gain_metrics.sum() / gain_metrics.shape[0]
# Make a df and transpose to a row for concatenation.
df_gain_metrics = pd.DataFrame(gain_metrics).T
batch_metrics = pd.concat([batch_metrics, df_gain_metrics], axis=1)
elif kind == "qini":
qini_metrics = (
effect_metrics.sum(axis=0) - effect_metrics[RANDOM_COL].sum()
) / effect_metrics.shape[0]
# Make a df and transpose to a row for concatenation.
df_qini_metrics = | pd.DataFrame(qini_metrics) | pandas.DataFrame |
import pandas as pd
import pytz
import datetime
from sqlalchemy.types import *
def convert_result_to_df(data):
df = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from calh.visualization import Heatmap
from . import CURR_DIR
def test_date_df_for_heatmap_from_ics_input():
hm = Heatmap(input_data=CURR_DIR / "data" / "ics" / "02-04_05-05-2020_urlab.ics")
expected_date_df = pd.DataFrame(
[
{"name": 1, "weekday": 1, "date": "2020-02-04", "week": 6, "month": 2},
{"name": 1, "weekday": 4, "date": "2020-02-07", "week": 6, "month": 2},
{"name": 1, "weekday": 3, "date": "2020-02-13", "week": 7, "month": 2},
{"name": 1, "weekday": 0, "date": "2020-02-17", "week": 8, "month": 2},
{"name": 1, "weekday": 1, "date": "2020-02-18", "week": 8, "month": 2},
{"name": 1, "weekday": 1, "date": "2020-02-25", "week": 9, "month": 2},
{"name": 1, "weekday": 0, "date": "2020-03-09", "week": 11, "month": 3},
{"name": 1, "weekday": 1, "date": "2020-03-10", "week": 11, "month": 3},
{"name": 1, "weekday": 3, "date": "2020-03-12", "week": 11, "month": 3},
{"name": 1, "weekday": 0, "date": "2020-05-04", "week": 19, "month": 5},
]
)
expected_date_df["date"] = | pd.to_datetime(expected_date_df["date"], utc=True) | pandas.to_datetime |
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pypatent
def search_patent():
"""
Find Rooster cumstomers in the patent databases
Find all operators in the mesenchymal/exosome sector
Identify operators not citing Rooster
"""
print("running search_patents")
# Name searchers
searchNames = []
searchNames.append('synthetic biology') #
searchNames.append('CRISPR') #
searchNames.append('cyanobacteria') #
searchNames.append('diatom') #
searchNames.append('spirulina') #
# searchNames.append('genetic engineering') #
searchNames.append('synbio') #
df = | pd.DataFrame() | pandas.DataFrame |
from typing import Tuple, Union
import datetime
import os
from xlrd import XLRDError
import pandas as pd
def load_df(url: str, sheet_name: Union[int, str] = 0) -> Tuple[pd.DataFrame, bool]:
from_html = os.path.splitext(url)[1] in ['.htm', '.html']
# Read from input file
if from_html:
try:
sheets = pd.read_html(url, encoding='iso8859_8') # TODO: get encoding as parameter
except ValueError:
print(f'Failed parsing {url}')
raise
assert sheets
df = sheets[0]
# Make the first row a column name, and drop it
df.columns = df.iloc[0]
df = df.reindex(df.index.drop(0))
df.reset_index(inplace=True, drop=True)
else:
try:
df = pd.read_excel(url, sheet_name=sheet_name)
except XLRDError:
print('Should be parsed as HTML?')
raise
assert not df.empty
return df, from_html
def parse_input_df(
df: pd.DataFrame,
from_html: bool,
num_header_rows: int,
columns_name: str,
drop_first_header: bool = False,
num_last_rows_to_discard: int = None,
num_columns_to_keep: int = None,
column_translator: dict = None,
convert_to_numeric: bool = True
) -> Tuple[pd.DataFrame, pd.DataFrame]:
if num_columns_to_keep is not None:
df.drop(df.columns[range(num_columns_to_keep, df.shape[1])], axis=1, inplace=True)
assert not df.empty
column_translator_internal = {
'שעה': 'Time',
'תאריך': 'Date'
}
if column_translator:
column_translator_internal.update(column_translator)
# Get headers and set it as dataframe columns
df_headers = df.iloc[0:num_header_rows - 1].fillna('').transpose().reset_index(drop=True)
assert not df_headers.empty, 'No headers'
# Translate all Hebrew columns to English
df_headers[0].replace(column_translator_internal, inplace=True)
if drop_first_header:
# Drop the first header, not before saving 'Date' and 'Time' header names
# This is due to the header dataframe being in the following form:
# 0 1
# 0 Flares HHPFlare
# 1 Flares NEWFF
# 2 Flares OLDFF
# 3 CAOL Flares Flare-PP-185
# 4 CAOL Flares Flare-PP-180
# 5 CAOL Flares Flare-Monomers
# 6 Time
# 7 Date
df_headers[1] = df_headers.apply(lambda row: row[1] or row[0], axis=1)
df_headers.drop(df_headers.columns[0], axis='columns', inplace=True)
# Join multiple-line headers to a single line
columns = df_headers.apply(lambda row: row.map(str).str.cat(sep=' ').strip(), axis=1)
# Update dataframe with manipulated headers
df.columns = columns
df.columns.name = columns_name
# Move units to a separate dataframe
df_units = df.iloc[num_header_rows-1:num_header_rows].reset_index(drop=True)
df_units.columns = columns
df_units.drop(columns=['Date', 'Time'], axis=1, inplace=True)
# Discard headers and units
df.drop(df.head(num_header_rows).index, inplace=True)
# Drop last garbage rows
if num_last_rows_to_discard:
df.drop(df.tail(num_last_rows_to_discard).index, inplace=True)
# Fix bad input where midnight is '01/01/1900 0:00:00'
# Convert the time to midnight, and increment day to the next day
midnight_invalid = [datetime.datetime(1900, 1, 1, 0, 0, 0), '24:00']
midnight_valid = datetime.time()
for i in df[df['Time'].isin(midnight_invalid)].index:
df.loc[i, 'Time'] = midnight_valid
df.loc[i, 'Date'] = pd.to_datetime(df.loc[i, 'Date'], dayfirst=True) + datetime.timedelta(days=1)
df.to_csv('after_fix_midnight.csv')
# Make sure that Date and Time contain datetime values
# (it is expected to be string when using read_html instead of read_excel)
# TODO: make sure this does not corrupt dataframe read using read_html
if from_html:
df['Date'] = pd.to_datetime(df['Date'], dayfirst=True)
df.to_csv('after_to_datetime.csv')
def normalize_time(x):
if isinstance(x, str):
return | pd.Timestamp(x) | pandas.Timestamp |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import numpy as np
import pandas
from pandas.core.common import is_bool_indexer
from pandas.core.indexing import check_bool_indexer
from pandas.core.dtypes.common import (
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_scalar,
)
from pandas.core.base import DataError
import warnings
from modin.backends.base.query_compiler import BaseQueryCompiler
from modin.error_message import ErrorMessage
from modin.utils import try_cast_to_pandas, wrap_udf_function
from modin.data_management.functions import (
FoldFunction,
MapFunction,
MapReduceFunction,
ReductionFunction,
BinaryFunction,
GroupbyReduceFunction,
)
def _get_axis(axis):
if axis == 0:
return lambda self: self._modin_frame.index
else:
return lambda self: self._modin_frame.columns
def _set_axis(axis):
if axis == 0:
def set_axis(self, idx):
self._modin_frame.index = idx
else:
def set_axis(self, cols):
self._modin_frame.columns = cols
return set_axis
def _str_map(func_name):
def str_op_builder(df, *args, **kwargs):
str_s = df.squeeze(axis=1).str
return getattr(pandas.Series.str, func_name)(str_s, *args, **kwargs).to_frame()
return str_op_builder
def _dt_prop_map(property_name):
"""
Create a function that call property of property `dt` of the series.
Parameters
----------
property_name
The property of `dt`, which will be applied.
Returns
-------
A callable function to be applied in the partitions
Notes
-----
This applies non-callable properties of `Series.dt`.
"""
def dt_op_builder(df, *args, **kwargs):
prop_val = getattr(df.squeeze(axis=1).dt, property_name)
if isinstance(prop_val, pandas.Series):
return prop_val.to_frame()
elif isinstance(prop_val, pandas.DataFrame):
return prop_val
else:
return pandas.DataFrame([prop_val])
return dt_op_builder
def _dt_func_map(func_name):
"""
Create a function that call method of property `dt` of the series.
Parameters
----------
func_name
The method of `dt`, which will be applied.
Returns
-------
A callable function to be applied in the partitions
Notes
-----
This applies callable methods of `Series.dt`.
"""
def dt_op_builder(df, *args, **kwargs):
dt_s = df.squeeze(axis=1).dt
return pandas.DataFrame(
getattr(pandas.Series.dt, func_name)(dt_s, *args, **kwargs)
)
return dt_op_builder
def copy_df_for_func(func):
"""
Create a function that copies the dataframe, likely because `func` is inplace.
Parameters
----------
func : callable
The function, usually updates a dataframe inplace.
Returns
-------
callable
A callable function to be applied in the partitions
"""
def caller(df, *args, **kwargs):
df = df.copy()
func(df, *args, **kwargs)
return df
return caller
class PandasQueryCompiler(BaseQueryCompiler):
"""This class implements the logic necessary for operating on partitions
with a Pandas backend. This logic is specific to Pandas."""
def __init__(self, modin_frame):
self._modin_frame = modin_frame
def default_to_pandas(self, pandas_op, *args, **kwargs):
"""Default to pandas behavior.
Parameters
----------
pandas_op : callable
The operation to apply, must be compatible pandas DataFrame call
args
The arguments for the `pandas_op`
kwargs
The keyword arguments for the `pandas_op`
Returns
-------
PandasQueryCompiler
The result of the `pandas_op`, converted back to PandasQueryCompiler
Note
----
This operation takes a distributed object and converts it directly to pandas.
"""
ErrorMessage.default_to_pandas(str(pandas_op))
args = (a.to_pandas() if isinstance(a, type(self)) else a for a in args)
kwargs = {
k: v.to_pandas if isinstance(v, type(self)) else v
for k, v in kwargs.items()
}
result = pandas_op(self.to_pandas(), *args, **kwargs)
if isinstance(result, pandas.Series):
if result.name is None:
result.name = "__reduced__"
result = result.to_frame()
if isinstance(result, pandas.DataFrame):
return self.from_pandas(result, type(self._modin_frame))
else:
return result
def to_pandas(self):
return self._modin_frame.to_pandas()
@classmethod
def from_pandas(cls, df, data_cls):
return cls(data_cls.from_pandas(df))
@classmethod
def from_arrow(cls, at, data_cls):
return cls(data_cls.from_arrow(at))
index = property(_get_axis(0), _set_axis(0))
columns = property(_get_axis(1), _set_axis(1))
@property
def dtypes(self):
return self._modin_frame.dtypes
# END Index, columns, and dtypes objects
# Metadata modification methods
def add_prefix(self, prefix, axis=1):
return self.__constructor__(self._modin_frame.add_prefix(prefix, axis))
def add_suffix(self, suffix, axis=1):
return self.__constructor__(self._modin_frame.add_suffix(suffix, axis))
# END Metadata modification methods
# Copy
# For copy, we don't want a situation where we modify the metadata of the
# copies if we end up modifying something here. We copy all of the metadata
# to prevent that.
def copy(self):
return self.__constructor__(self._modin_frame.copy())
# END Copy
# Append/Concat/Join (Not Merge)
# The append/concat/join operations should ideally never trigger remote
# compute. These operations should only ever be manipulations of the
# metadata of the resulting object. It should just be a simple matter of
# appending the other object's blocks and adding np.nan columns for the new
# columns, if needed. If new columns are added, some compute may be
# required, though it can be delayed.
#
# Currently this computation is not delayed, and it may make a copy of the
# DataFrame in memory. This can be problematic and should be fixed in the
# future. TODO (devin-petersohn): Delay reindexing
def concat(self, axis, other, **kwargs):
"""Concatenates two objects together.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concat with.
Returns:
Concatenated objects.
"""
if not isinstance(other, list):
other = [other]
assert all(
isinstance(o, type(self)) for o in other
), "Different Manager objects are being used. This is not allowed"
sort = kwargs.get("sort", None)
if sort is None:
sort = False
join = kwargs.get("join", "outer")
ignore_index = kwargs.get("ignore_index", False)
other_modin_frame = [o._modin_frame for o in other]
new_modin_frame = self._modin_frame._concat(axis, other_modin_frame, join, sort)
result = self.__constructor__(new_modin_frame)
if ignore_index:
if axis == 0:
return result.reset_index(drop=True)
else:
result.columns = pandas.RangeIndex(len(result.columns))
return result
return result
# END Append/Concat/Join
# Data Management Methods
def free(self):
"""In the future, this will hopefully trigger a cleanup of this object."""
# TODO create a way to clean up this object.
return
# END Data Management Methods
# To NumPy
def to_numpy(self, **kwargs):
"""
Converts Modin DataFrame to NumPy array.
Returns
-------
NumPy array of the QueryCompiler.
"""
arr = self._modin_frame.to_numpy(**kwargs)
ErrorMessage.catch_bugs_and_request_email(
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)
)
return arr
# END To NumPy
# Binary operations (e.g. add, sub)
# These operations require two DataFrames and will change the shape of the
# data if the index objects don't match. An outer join + op is performed,
# such that columns/rows that don't have an index on the other DataFrame
# result in NaN values.
add = BinaryFunction.register(pandas.DataFrame.add)
combine = BinaryFunction.register(pandas.DataFrame.combine)
combine_first = BinaryFunction.register(pandas.DataFrame.combine_first)
eq = BinaryFunction.register(pandas.DataFrame.eq)
floordiv = BinaryFunction.register(pandas.DataFrame.floordiv)
ge = BinaryFunction.register(pandas.DataFrame.ge)
gt = BinaryFunction.register(pandas.DataFrame.gt)
le = BinaryFunction.register(pandas.DataFrame.le)
lt = BinaryFunction.register(pandas.DataFrame.lt)
mod = BinaryFunction.register(pandas.DataFrame.mod)
mul = BinaryFunction.register(pandas.DataFrame.mul)
ne = BinaryFunction.register(pandas.DataFrame.ne)
pow = BinaryFunction.register(pandas.DataFrame.pow)
rfloordiv = BinaryFunction.register(pandas.DataFrame.rfloordiv)
rmod = BinaryFunction.register(pandas.DataFrame.rmod)
rpow = BinaryFunction.register(pandas.DataFrame.rpow)
rsub = BinaryFunction.register(pandas.DataFrame.rsub)
rtruediv = BinaryFunction.register(pandas.DataFrame.rtruediv)
sub = BinaryFunction.register(pandas.DataFrame.sub)
truediv = BinaryFunction.register(pandas.DataFrame.truediv)
__and__ = BinaryFunction.register(pandas.DataFrame.__and__)
__or__ = BinaryFunction.register(pandas.DataFrame.__or__)
__rand__ = BinaryFunction.register(pandas.DataFrame.__rand__)
__ror__ = BinaryFunction.register(pandas.DataFrame.__ror__)
__rxor__ = BinaryFunction.register(pandas.DataFrame.__rxor__)
__xor__ = BinaryFunction.register(pandas.DataFrame.__xor__)
df_update = BinaryFunction.register(
copy_df_for_func(pandas.DataFrame.update), join_type="left"
)
series_update = BinaryFunction.register(
copy_df_for_func(
lambda x, y: pandas.Series.update(x.squeeze(axis=1), y.squeeze(axis=1))
),
join_type="left",
)
def where(self, cond, other, **kwargs):
"""Gets values from this manager where cond is true else from other.
Args:
cond: Condition on which to evaluate values.
Returns:
New QueryCompiler with updated data and index.
"""
assert isinstance(
cond, type(self)
), "Must have the same QueryCompiler subclass to perform this operation"
if isinstance(other, type(self)):
# Note: Currently we are doing this with two maps across the entire
# data. This can be done with a single map, but it will take a
# modification in the `BlockPartition` class.
# If this were in one pass it would be ~2x faster.
# TODO (devin-petersohn) rewrite this to take one pass.
def where_builder_first_pass(cond, other, **kwargs):
return cond.where(cond, other, **kwargs)
first_pass = cond._modin_frame._binary_op(
where_builder_first_pass, other._modin_frame, join_type="left"
)
def where_builder_second_pass(df, new_other, **kwargs):
return df.where(new_other.eq(True), new_other, **kwargs)
new_modin_frame = self._modin_frame._binary_op(
where_builder_second_pass, first_pass, join_type="left"
)
# This will be a Series of scalars to be applied based on the condition
# dataframe.
else:
def where_builder_series(df, cond):
return df.where(cond, other, **kwargs)
new_modin_frame = self._modin_frame._binary_op(
where_builder_series, cond._modin_frame, join_type="left"
)
return self.__constructor__(new_modin_frame)
def merge(self, right, **kwargs):
"""
Merge DataFrame or named Series objects with a database-style join.
Parameters
----------
right : PandasQueryCompiler
The query compiler of the right DataFrame to merge with.
Returns
-------
PandasQueryCompiler
A new query compiler that contains result of the merge.
Notes
-----
See pd.merge or pd.DataFrame.merge for more info on kwargs.
"""
how = kwargs.get("how", "inner")
on = kwargs.get("on", None)
left_on = kwargs.get("left_on", None)
right_on = kwargs.get("right_on", None)
left_index = kwargs.get("left_index", False)
right_index = kwargs.get("right_index", False)
sort = kwargs.get("sort", False)
if how in ["left", "inner"] and left_index is False and right_index is False:
right = right.to_pandas()
kwargs["sort"] = False
def map_func(left, right=right, kwargs=kwargs):
return pandas.merge(left, right, **kwargs)
new_self = self.__constructor__(
self._modin_frame._apply_full_axis(1, map_func)
)
is_reset_index = True
if left_on and right_on:
left_on = left_on if is_list_like(left_on) else [left_on]
right_on = right_on if is_list_like(right_on) else [right_on]
is_reset_index = (
False
if any(o in new_self.index.names for o in left_on)
and any(o in right.index.names for o in right_on)
else True
)
if sort:
new_self = (
new_self.sort_rows_by_column_values(left_on.append(right_on))
if is_reset_index
else new_self.sort_index(axis=0, level=left_on.append(right_on))
)
if on:
on = on if is_list_like(on) else [on]
is_reset_index = not any(
o in new_self.index.names and o in right.index.names for o in on
)
if sort:
new_self = (
new_self.sort_rows_by_column_values(on)
if is_reset_index
else new_self.sort_index(axis=0, level=on)
)
return new_self.reset_index(drop=True) if is_reset_index else new_self
else:
return self.default_to_pandas(pandas.DataFrame.merge, right, **kwargs)
def join(self, right, **kwargs):
"""
Join columns of another DataFrame.
Parameters
----------
right : BaseQueryCompiler
The query compiler of the right DataFrame to join with.
Returns
-------
BaseQueryCompiler
A new query compiler that contains result of the join.
Notes
-----
See pd.DataFrame.join for more info on kwargs.
"""
on = kwargs.get("on", None)
how = kwargs.get("how", "left")
sort = kwargs.get("sort", False)
if how in ["left", "inner"]:
right = right.to_pandas()
def map_func(left, right=right, kwargs=kwargs):
return pandas.DataFrame.join(left, right, **kwargs)
new_self = self.__constructor__(
self._modin_frame._apply_full_axis(1, map_func)
)
return new_self.sort_rows_by_column_values(on) if sort else new_self
else:
return self.default_to_pandas(pandas.DataFrame.join, right, **kwargs)
# END Inter-Data operations
# Reindex/reset_index (may shuffle data)
def reindex(self, axis, labels, **kwargs):
"""Fits a new index for this Manager.
Args:
axis: The axis index object to target the reindex on.
labels: New labels to conform 'axis' on to.
Returns:
A new QueryCompiler with updated data and new index.
"""
new_index = self.index if axis else labels
new_columns = labels if axis else self.columns
new_modin_frame = self._modin_frame._apply_full_axis(
axis,
lambda df: df.reindex(labels=labels, axis=axis, **kwargs),
new_index=new_index,
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
def reset_index(self, **kwargs):
"""Removes all levels from index and sets a default level_0 index.
Returns:
A new QueryCompiler with updated data and reset index.
"""
drop = kwargs.get("drop", False)
level = kwargs.get("level", None)
# TODO Implement level
if level is not None or self.has_multiindex():
return self.default_to_pandas(pandas.DataFrame.reset_index, **kwargs)
if not drop:
new_column_name = (
self.index.name
if self.index.name is not None
else "index"
if "index" not in self.columns
else "level_0"
)
new_self = self.insert(0, new_column_name, self.index)
else:
new_self = self.copy()
new_self.index = pandas.RangeIndex(len(new_self.index))
return new_self
# END Reindex/reset_index
# Transpose
# For transpose, we aren't going to immediately copy everything. Since the
# actual transpose operation is very fast, we will just do it before any
# operation that gets called on the transposed data. See _prepare_method
# for how the transpose is applied.
#
# Our invariants assume that the blocks are transposed, but not the
# data inside. Sometimes we have to reverse this transposition of blocks
# for simplicity of implementation.
def transpose(self, *args, **kwargs):
"""Transposes this QueryCompiler.
Returns:
Transposed new QueryCompiler.
"""
# Switch the index and columns and transpose the data within the blocks.
return self.__constructor__(self._modin_frame.transpose())
def columnarize(self):
"""
Transposes this QueryCompiler if it has a single row but multiple columns.
This method should be called for QueryCompilers representing a Series object,
i.e. self.is_series_like() should be True.
Returns
-------
PandasQueryCompiler
Transposed new QueryCompiler or self.
"""
if len(self.columns) != 1 or (
len(self.index) == 1 and self.index[0] == "__reduced__"
):
return self.transpose()
return self
def is_series_like(self):
"""Return True if QueryCompiler has a single column or row"""
return len(self.columns) == 1 or len(self.index) == 1
# END Transpose
# MapReduce operations
def _is_monotonic(self, func_type=None):
funcs = {
"increasing": lambda df: df.is_monotonic_increasing,
"decreasing": lambda df: df.is_monotonic_decreasing,
}
monotonic_fn = funcs.get(func_type, funcs["increasing"])
def is_monotonic_map(df):
df = df.squeeze(axis=1)
return [monotonic_fn(df), df.iloc[0], df.iloc[len(df) - 1]]
def is_monotonic_reduce(df):
df = df.squeeze(axis=1)
common_case = df[0].all()
left_edges = df[1]
right_edges = df[2]
edges_list = []
for i in range(len(left_edges)):
edges_list.extend([left_edges.iloc[i], right_edges.iloc[i]])
edge_case = monotonic_fn(pandas.Series(edges_list))
return [common_case and edge_case]
return MapReduceFunction.register(
is_monotonic_map, is_monotonic_reduce, axis=0
)(self)
def is_monotonic_decreasing(self):
return self._is_monotonic(func_type="decreasing")
is_monotonic = _is_monotonic
count = MapReduceFunction.register(pandas.DataFrame.count, pandas.DataFrame.sum)
max = MapReduceFunction.register(pandas.DataFrame.max, pandas.DataFrame.max)
min = MapReduceFunction.register(pandas.DataFrame.min, pandas.DataFrame.min)
sum = MapReduceFunction.register(pandas.DataFrame.sum, pandas.DataFrame.sum)
prod = MapReduceFunction.register(pandas.DataFrame.prod, pandas.DataFrame.prod)
any = MapReduceFunction.register(pandas.DataFrame.any, pandas.DataFrame.any)
all = MapReduceFunction.register(pandas.DataFrame.all, pandas.DataFrame.all)
memory_usage = MapReduceFunction.register(
pandas.DataFrame.memory_usage,
lambda x, *args, **kwargs: pandas.DataFrame.sum(x),
axis=0,
)
mean = MapReduceFunction.register(
lambda df, **kwargs: df.apply(
lambda x: (x.sum(skipna=kwargs.get("skipna", True)), x.count()),
axis=kwargs.get("axis", 0),
result_type="reduce",
).set_axis(df.axes[kwargs.get("axis", 0) ^ 1], axis=0),
lambda df, **kwargs: df.apply(
lambda x: x.apply(lambda d: d[0]).sum(skipna=kwargs.get("skipna", True))
/ x.apply(lambda d: d[1]).sum(skipna=kwargs.get("skipna", True)),
axis=kwargs.get("axis", 0),
).set_axis(df.axes[kwargs.get("axis", 0) ^ 1], axis=0),
)
def value_counts(self, **kwargs):
"""
Return a QueryCompiler of Series containing counts of unique values.
Returns
-------
PandasQueryCompiler
"""
if kwargs.get("bins", None) is not None:
new_modin_frame = self._modin_frame._apply_full_axis(
0, lambda df: df.squeeze(axis=1).value_counts(**kwargs)
)
return self.__constructor__(new_modin_frame)
def map_func(df, *args, **kwargs):
return df.squeeze(axis=1).value_counts(**kwargs)
def reduce_func(df, *args, **kwargs):
normalize = kwargs.get("normalize", False)
sort = kwargs.get("sort", True)
ascending = kwargs.get("ascending", False)
dropna = kwargs.get("dropna", True)
try:
result = df.squeeze(axis=1).groupby(df.index, sort=False).sum()
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except (ValueError):
result = df.copy().squeeze(axis=1).groupby(df.index, sort=False).sum()
if not dropna and np.nan in df.index:
result = result.append(
pandas.Series(
[df.squeeze(axis=1).loc[[np.nan]].sum()], index=[np.nan]
)
)
if normalize:
result = result / df.squeeze(axis=1).sum()
result = result.sort_values(ascending=ascending) if sort else result
# We want to sort both values and indices of the result object.
# This function will sort indices for equal values.
def sort_index_for_equal_values(result, ascending):
"""
Sort indices for equal values of result object.
Parameters
----------
result : pandas.Series or pandas.DataFrame with one column
The object whose indices for equal values is needed to sort.
ascending : boolean
Sort in ascending (if it is True) or descending (if it is False) order.
Returns
-------
pandas.DataFrame
A new DataFrame with sorted indices.
"""
is_range = False
is_end = False
i = 0
new_index = np.empty(len(result), dtype=type(result.index))
while i < len(result):
j = i
if i < len(result) - 1:
while result[result.index[i]] == result[result.index[i + 1]]:
i += 1
if is_range is False:
is_range = True
if i == len(result) - 1:
is_end = True
break
if is_range:
k = j
for val in sorted(
result.index[j : i + 1], reverse=not ascending
):
new_index[k] = val
k += 1
if is_end:
break
is_range = False
else:
new_index[j] = result.index[j]
i += 1
return pandas.DataFrame(result, index=new_index)
return sort_index_for_equal_values(result, ascending)
return MapReduceFunction.register(map_func, reduce_func, preserve_index=False)(
self, **kwargs
)
# END MapReduce operations
# Reduction operations
idxmax = ReductionFunction.register(pandas.DataFrame.idxmax)
idxmin = ReductionFunction.register(pandas.DataFrame.idxmin)
median = ReductionFunction.register(pandas.DataFrame.median)
nunique = ReductionFunction.register(pandas.DataFrame.nunique)
skew = ReductionFunction.register(pandas.DataFrame.skew)
kurt = ReductionFunction.register(pandas.DataFrame.kurt)
sem = ReductionFunction.register(pandas.DataFrame.sem)
std = ReductionFunction.register(pandas.DataFrame.std)
var = ReductionFunction.register(pandas.DataFrame.var)
sum_min_count = ReductionFunction.register(pandas.DataFrame.sum)
prod_min_count = ReductionFunction.register(pandas.DataFrame.prod)
quantile_for_single_value = ReductionFunction.register(pandas.DataFrame.quantile)
mad = ReductionFunction.register(pandas.DataFrame.mad)
to_datetime = ReductionFunction.register(
lambda df, *args, **kwargs: pandas.to_datetime(
df.squeeze(axis=1), *args, **kwargs
),
axis=1,
)
# END Reduction operations
def _resample_func(
self, resample_args, func_name, new_columns=None, df_op=None, *args, **kwargs
):
def map_func(df, resample_args=resample_args):
if df_op is not None:
df = df_op(df)
resampled_val = df.resample(*resample_args)
op = getattr(pandas.core.resample.Resampler, func_name)
if callable(op):
try:
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
val = op(resampled_val, *args, **kwargs)
except (ValueError):
resampled_val = df.copy().resample(*resample_args)
val = op(resampled_val, *args, **kwargs)
else:
val = getattr(resampled_val, func_name)
if isinstance(val, pandas.Series):
return val.to_frame()
else:
return val
new_modin_frame = self._modin_frame._apply_full_axis(
axis=0, func=map_func, new_columns=new_columns
)
return self.__constructor__(new_modin_frame)
def resample_get_group(self, resample_args, name, obj):
return self._resample_func(resample_args, "get_group", name=name, obj=obj)
def resample_app_ser(self, resample_args, func, *args, **kwargs):
return self._resample_func(
resample_args,
"apply",
df_op=lambda df: df.squeeze(axis=1),
func=func,
*args,
**kwargs,
)
def resample_app_df(self, resample_args, func, *args, **kwargs):
return self._resample_func(resample_args, "apply", func=func, *args, **kwargs)
def resample_agg_ser(self, resample_args, func, *args, **kwargs):
return self._resample_func(
resample_args,
"aggregate",
df_op=lambda df: df.squeeze(axis=1),
func=func,
*args,
**kwargs,
)
def resample_agg_df(self, resample_args, func, *args, **kwargs):
return self._resample_func(
resample_args, "aggregate", func=func, *args, **kwargs
)
def resample_transform(self, resample_args, arg, *args, **kwargs):
return self._resample_func(resample_args, "transform", arg=arg, *args, **kwargs)
def resample_pipe(self, resample_args, func, *args, **kwargs):
return self._resample_func(resample_args, "pipe", func=func, *args, **kwargs)
def resample_ffill(self, resample_args, limit):
return self._resample_func(resample_args, "ffill", limit=limit)
def resample_backfill(self, resample_args, limit):
return self._resample_func(resample_args, "backfill", limit=limit)
def resample_bfill(self, resample_args, limit):
return self._resample_func(resample_args, "bfill", limit=limit)
def resample_pad(self, resample_args, limit):
return self._resample_func(resample_args, "pad", limit=limit)
def resample_nearest(self, resample_args, limit):
return self._resample_func(resample_args, "nearest", limit=limit)
def resample_fillna(self, resample_args, method, limit):
return self._resample_func(resample_args, "fillna", method=method, limit=limit)
def resample_asfreq(self, resample_args, fill_value):
return self._resample_func(resample_args, "asfreq", fill_value=fill_value)
def resample_interpolate(
self,
resample_args,
method,
axis,
limit,
inplace,
limit_direction,
limit_area,
downcast,
**kwargs,
):
return self._resample_func(
resample_args,
"interpolate",
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast,
**kwargs,
)
def resample_count(self, resample_args):
return self._resample_func(resample_args, "count")
def resample_nunique(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "nunique", _method=_method, *args, **kwargs
)
def resample_first(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "first", _method=_method, *args, **kwargs
)
def resample_last(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "last", _method=_method, *args, **kwargs
)
def resample_max(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "max", _method=_method, *args, **kwargs
)
def resample_mean(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "median", _method=_method, *args, **kwargs
)
def resample_median(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "median", _method=_method, *args, **kwargs
)
def resample_min(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "min", _method=_method, *args, **kwargs
)
def resample_ohlc_ser(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args,
"ohlc",
df_op=lambda df: df.squeeze(axis=1),
_method=_method,
*args,
**kwargs,
)
def resample_ohlc_df(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "ohlc", _method=_method, *args, **kwargs
)
def resample_prod(self, resample_args, _method, min_count, *args, **kwargs):
return self._resample_func(
resample_args, "prod", _method=_method, min_count=min_count, *args, **kwargs
)
def resample_size(self, resample_args):
return self._resample_func(resample_args, "size", new_columns=["__reduced__"])
def resample_sem(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "sem", _method=_method, *args, **kwargs
)
def resample_std(self, resample_args, ddof, *args, **kwargs):
return self._resample_func(resample_args, "std", ddof=ddof, *args, **kwargs)
def resample_sum(self, resample_args, _method, min_count, *args, **kwargs):
return self._resample_func(
resample_args, "sum", _method=_method, min_count=min_count, *args, **kwargs
)
def resample_var(self, resample_args, ddof, *args, **kwargs):
return self._resample_func(resample_args, "var", ddof=ddof, *args, **kwargs)
def resample_quantile(self, resample_args, q, **kwargs):
return self._resample_func(resample_args, "quantile", q=q, **kwargs)
window_mean = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).mean(*args, **kwargs)
)
)
window_sum = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).sum(*args, **kwargs)
)
)
window_var = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).var(ddof=ddof, *args, **kwargs)
)
)
window_std = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).std(ddof=ddof, *args, **kwargs)
)
)
rolling_count = FoldFunction.register(
lambda df, rolling_args: pandas.DataFrame(df.rolling(*rolling_args).count())
)
rolling_sum = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).sum(*args, **kwargs)
)
)
rolling_mean = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).mean(*args, **kwargs)
)
)
rolling_median = FoldFunction.register(
lambda df, rolling_args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).median(**kwargs)
)
)
rolling_var = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).var(ddof=ddof, *args, **kwargs)
)
)
rolling_std = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).std(ddof=ddof, *args, **kwargs)
)
)
rolling_min = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).min(*args, **kwargs)
)
)
rolling_max = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).max(*args, **kwargs)
)
)
rolling_skew = FoldFunction.register(
lambda df, rolling_args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).skew(**kwargs)
)
)
rolling_kurt = FoldFunction.register(
lambda df, rolling_args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).kurt(**kwargs)
)
)
rolling_apply = FoldFunction.register(
lambda df, rolling_args, func, raw, engine, engine_kwargs, args, kwargs: pandas.DataFrame(
df.rolling(*rolling_args).apply(
func=func,
raw=raw,
engine=engine,
engine_kwargs=engine_kwargs,
args=args,
kwargs=kwargs,
)
)
)
rolling_quantile = FoldFunction.register(
lambda df, rolling_args, quantile, interpolation, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).quantile(
quantile=quantile, interpolation=interpolation, **kwargs
)
)
)
def rolling_corr(self, rolling_args, other, pairwise, *args, **kwargs):
if len(self.columns) > 1:
return self.default_to_pandas(
lambda df: pandas.DataFrame.rolling(df, *rolling_args).corr(
other=other, pairwise=pairwise, *args, **kwargs
)
)
else:
return FoldFunction.register(
lambda df: pandas.DataFrame(
df.rolling(*rolling_args).corr(
other=other, pairwise=pairwise, *args, **kwargs
)
)
)(self)
def rolling_cov(self, rolling_args, other, pairwise, ddof, **kwargs):
if len(self.columns) > 1:
return self.default_to_pandas(
lambda df: pandas.DataFrame.rolling(df, *rolling_args).cov(
other=other, pairwise=pairwise, ddof=ddof, **kwargs
)
)
else:
return FoldFunction.register(
lambda df: pandas.DataFrame(
df.rolling(*rolling_args).cov(
other=other, pairwise=pairwise, ddof=ddof, **kwargs
)
)
)(self)
def rolling_aggregate(self, rolling_args, func, *args, **kwargs):
new_modin_frame = self._modin_frame._apply_full_axis(
0,
lambda df: pandas.DataFrame(
df.rolling(*rolling_args).aggregate(func=func, *args, **kwargs)
),
new_index=self.index,
)
return self.__constructor__(new_modin_frame)
def unstack(self, level, fill_value):
if not isinstance(self.index, pandas.MultiIndex) or (
isinstance(self.index, pandas.MultiIndex)
and is_list_like(level)
and len(level) == self.index.nlevels
):
axis = 1
new_columns = ["__reduced__"]
need_reindex = True
else:
axis = 0
new_columns = None
need_reindex = False
def map_func(df):
return pandas.DataFrame(df.unstack(level=level, fill_value=fill_value))
def is_tree_like_or_1d(calc_index, valid_index):
if not isinstance(calc_index, pandas.MultiIndex):
return True
actual_len = 1
for lvl in calc_index.levels:
actual_len *= len(lvl)
return len(self.index) * len(self.columns) == actual_len * len(valid_index)
is_tree_like_or_1d_index = is_tree_like_or_1d(self.index, self.columns)
is_tree_like_or_1d_cols = is_tree_like_or_1d(self.columns, self.index)
is_all_multi_list = False
if (
isinstance(self.index, pandas.MultiIndex)
and isinstance(self.columns, pandas.MultiIndex)
and is_list_like(level)
and len(level) == self.index.nlevels
and is_tree_like_or_1d_index
and is_tree_like_or_1d_cols
):
is_all_multi_list = True
real_cols_bkp = self.columns
obj = self.copy()
obj.columns = np.arange(len(obj.columns))
else:
obj = self
new_modin_frame = obj._modin_frame._apply_full_axis(
axis, map_func, new_columns=new_columns
)
result = self.__constructor__(new_modin_frame)
def compute_index(index, columns, consider_index=True, consider_columns=True):
def get_unique_level_values(index):
return [
index.get_level_values(lvl).unique()
for lvl in np.arange(index.nlevels)
]
new_index = (
get_unique_level_values(index)
if consider_index
else index
if isinstance(index, list)
else [index]
)
new_columns = (
get_unique_level_values(columns) if consider_columns else [columns]
)
return pandas.MultiIndex.from_product([*new_columns, *new_index])
if is_all_multi_list and is_tree_like_or_1d_index and is_tree_like_or_1d_cols:
result = result.sort_index()
index_level_values = [lvl for lvl in obj.index.levels]
result.index = compute_index(
index_level_values, real_cols_bkp, consider_index=False
)
return result
if need_reindex:
if is_tree_like_or_1d_index and is_tree_like_or_1d_cols:
is_recompute_index = isinstance(self.index, pandas.MultiIndex)
is_recompute_columns = not is_recompute_index and isinstance(
self.columns, pandas.MultiIndex
)
new_index = compute_index(
self.index, self.columns, is_recompute_index, is_recompute_columns
)
elif is_tree_like_or_1d_index != is_tree_like_or_1d_cols:
if isinstance(self.columns, pandas.MultiIndex) or not isinstance(
self.index, pandas.MultiIndex
):
return result
else:
index = (
self.index.sortlevel()[0]
if is_tree_like_or_1d_index
and not is_tree_like_or_1d_cols
and isinstance(self.index, pandas.MultiIndex)
else self.index
)
index = pandas.MultiIndex.from_tuples(
list(index) * len(self.columns)
)
columns = self.columns.repeat(len(self.index))
index_levels = [
index.get_level_values(i) for i in range(index.nlevels)
]
new_index = pandas.MultiIndex.from_arrays(
[columns] + index_levels,
names=self.columns.names + self.index.names,
)
else:
return result
result = result.reindex(0, new_index)
return result
def stack(self, level, dropna):
if not isinstance(self.columns, pandas.MultiIndex) or (
isinstance(self.columns, pandas.MultiIndex)
and is_list_like(level)
and len(level) == self.columns.nlevels
):
new_columns = ["__reduced__"]
else:
new_columns = None
new_modin_frame = self._modin_frame._apply_full_axis(
1,
lambda df: pandas.DataFrame(df.stack(level=level, dropna=dropna)),
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
# Map partitions operations
# These operations are operations that apply a function to every partition.
abs = MapFunction.register(pandas.DataFrame.abs, dtypes="copy")
applymap = MapFunction.register(pandas.DataFrame.applymap)
conj = MapFunction.register(
lambda df, *args, **kwargs: pandas.DataFrame(np.conj(df))
)
invert = MapFunction.register(pandas.DataFrame.__invert__)
isin = MapFunction.register(pandas.DataFrame.isin, dtypes=np.bool)
isna = MapFunction.register(pandas.DataFrame.isna, dtypes=np.bool)
negative = MapFunction.register(pandas.DataFrame.__neg__)
notna = MapFunction.register(pandas.DataFrame.notna, dtypes=np.bool)
round = MapFunction.register(pandas.DataFrame.round)
replace = MapFunction.register(pandas.DataFrame.replace)
series_view = MapFunction.register(
lambda df, *args, **kwargs: pandas.DataFrame(
df.squeeze(axis=1).view(*args, **kwargs)
)
)
to_numeric = MapFunction.register(
lambda df, *args, **kwargs: pandas.DataFrame(
pandas.to_numeric(df.squeeze(axis=1), *args, **kwargs)
)
)
def repeat(self, repeats):
def map_fn(df):
return pandas.DataFrame(df.squeeze(axis=1).repeat(repeats))
if isinstance(repeats, int) or (is_list_like(repeats) and len(repeats) == 1):
return MapFunction.register(map_fn, validate_index=True)(self)
else:
return self.__constructor__(self._modin_frame._apply_full_axis(0, map_fn))
# END Map partitions operations
# String map partitions operations
str_capitalize = MapFunction.register(_str_map("capitalize"), dtypes="copy")
str_center = MapFunction.register(_str_map("center"), dtypes="copy")
str_contains = MapFunction.register(_str_map("contains"), dtypes=np.bool)
str_count = MapFunction.register(_str_map("count"), dtypes=int)
str_endswith = MapFunction.register(_str_map("endswith"), dtypes=np.bool)
str_find = MapFunction.register(_str_map("find"), dtypes="copy")
str_findall = MapFunction.register(_str_map("findall"), dtypes="copy")
str_get = MapFunction.register(_str_map("get"), dtypes="copy")
str_index = MapFunction.register(_str_map("index"), dtypes="copy")
str_isalnum = MapFunction.register(_str_map("isalnum"), dtypes=np.bool)
str_isalpha = MapFunction.register(_str_map("isalpha"), dtypes=np.bool)
str_isdecimal = MapFunction.register(_str_map("isdecimal"), dtypes=np.bool)
str_isdigit = MapFunction.register(_str_map("isdigit"), dtypes=np.bool)
str_islower = MapFunction.register(_str_map("islower"), dtypes=np.bool)
str_isnumeric = MapFunction.register(_str_map("isnumeric"), dtypes=np.bool)
str_isspace = MapFunction.register(_str_map("isspace"), dtypes=np.bool)
str_istitle = MapFunction.register(_str_map("istitle"), dtypes=np.bool)
str_isupper = MapFunction.register(_str_map("isupper"), dtypes=np.bool)
str_join = MapFunction.register(_str_map("join"), dtypes="copy")
str_len = MapFunction.register(_str_map("len"), dtypes=int)
str_ljust = MapFunction.register(_str_map("ljust"), dtypes="copy")
str_lower = MapFunction.register(_str_map("lower"), dtypes="copy")
str_lstrip = MapFunction.register(_str_map("lstrip"), dtypes="copy")
str_match = MapFunction.register(_str_map("match"), dtypes="copy")
str_normalize = MapFunction.register(_str_map("normalize"), dtypes="copy")
str_pad = MapFunction.register(_str_map("pad"), dtypes="copy")
str_partition = MapFunction.register(_str_map("partition"), dtypes="copy")
str_repeat = MapFunction.register(_str_map("repeat"), dtypes="copy")
str_replace = MapFunction.register(_str_map("replace"), dtypes="copy")
str_rfind = MapFunction.register(_str_map("rfind"), dtypes="copy")
str_rindex = MapFunction.register(_str_map("rindex"), dtypes="copy")
str_rjust = MapFunction.register(_str_map("rjust"), dtypes="copy")
str_rpartition = MapFunction.register(_str_map("rpartition"), dtypes="copy")
str_rsplit = MapFunction.register(_str_map("rsplit"), dtypes="copy")
str_rstrip = MapFunction.register(_str_map("rstrip"), dtypes="copy")
str_slice = MapFunction.register(_str_map("slice"), dtypes="copy")
str_slice_replace = MapFunction.register(_str_map("slice_replace"), dtypes="copy")
str_split = MapFunction.register(_str_map("split"), dtypes="copy")
str_startswith = MapFunction.register(_str_map("startswith"), dtypes=np.bool)
str_strip = MapFunction.register(_str_map("strip"), dtypes="copy")
str_swapcase = MapFunction.register(_str_map("swapcase"), dtypes="copy")
str_title = MapFunction.register(_str_map("title"), dtypes="copy")
str_translate = MapFunction.register(_str_map("translate"), dtypes="copy")
str_upper = MapFunction.register(_str_map("upper"), dtypes="copy")
str_wrap = MapFunction.register(_str_map("wrap"), dtypes="copy")
str_zfill = MapFunction.register(_str_map("zfill"), dtypes="copy")
# END String map partitions operations
def unique(self):
"""Return unique values of Series object.
Returns
-------
ndarray
The unique values returned as a NumPy array.
"""
new_modin_frame = self._modin_frame._apply_full_axis(
0,
lambda x: x.squeeze(axis=1).unique(),
new_columns=self.columns,
)
return self.__constructor__(new_modin_frame)
def searchsorted(self, **kwargs):
"""
Return a QueryCompiler with value/values indicies, which they should be inserted
to maintain order of the passed Series.
Returns
-------
PandasQueryCompiler
"""
def map_func(part, *args, **kwargs):
elements_number = len(part.index)
assert elements_number > 0, "Wrong mapping behaviour of MapReduce"
# unify value type
value = kwargs.pop("value")
value = np.array([value]) if is_scalar(value) else value
if elements_number == 1:
part = part[part.columns[0]]
else:
part = part.squeeze()
part_index_start = part.index.start
part_index_stop = part.index.stop
result = part.searchsorted(value=value, *args, **kwargs)
processed_results = {}
value_number = 0
for value_result in result:
value_result += part_index_start
if value_result > part_index_start and value_result < part_index_stop:
processed_results[f"value{value_number}"] = {
"relative_location": "current_partition",
"index": value_result,
}
elif value_result <= part_index_start:
processed_results[f"value{value_number}"] = {
"relative_location": "previoius_partitions",
"index": part_index_start,
}
else:
processed_results[f"value{value_number}"] = {
"relative_location": "next_partitions",
"index": part_index_stop,
}
value_number += 1
return pandas.DataFrame(processed_results)
def reduce_func(map_results, *args, **kwargs):
def get_value_index(value_result):
value_result_grouped = value_result.groupby(level=0)
rel_location = value_result_grouped.get_group("relative_location")
ind = value_result_grouped.get_group("index")
# executes if result is inside of the mapped part
if "current_partition" in rel_location.values:
assert (
rel_location[rel_location == "current_partition"].count() == 1
), "Each value should have single result"
return ind[rel_location.values == "current_partition"]
# executes if result is between mapped parts
elif rel_location.nunique(dropna=False) > 1:
return ind[rel_location.values == "previoius_partitions"][0]
# executes if result is outside of the mapped part
else:
if "next_partitions" in rel_location.values:
return ind[-1]
else:
return ind[0]
map_results_parsed = map_results.apply(
lambda ser: get_value_index(ser)
).squeeze()
if isinstance(map_results_parsed, pandas.Series):
map_results_parsed = map_results_parsed.to_list()
return pandas.Series(map_results_parsed)
return MapReduceFunction.register(map_func, reduce_func, preserve_index=False)(
self, **kwargs
)
# Dt map partitions operations
dt_date = MapFunction.register(_dt_prop_map("date"))
dt_time = MapFunction.register(_dt_prop_map("time"))
dt_timetz = MapFunction.register(_dt_prop_map("timetz"))
dt_year = MapFunction.register(_dt_prop_map("year"))
dt_month = MapFunction.register(_dt_prop_map("month"))
dt_day = MapFunction.register(_dt_prop_map("day"))
dt_hour = MapFunction.register(_dt_prop_map("hour"))
dt_minute = MapFunction.register(_dt_prop_map("minute"))
dt_second = MapFunction.register(_dt_prop_map("second"))
dt_microsecond = MapFunction.register(_dt_prop_map("microsecond"))
dt_nanosecond = MapFunction.register(_dt_prop_map("nanosecond"))
dt_week = MapFunction.register(_dt_prop_map("week"))
dt_weekofyear = MapFunction.register(_dt_prop_map("weekofyear"))
dt_dayofweek = MapFunction.register(_dt_prop_map("dayofweek"))
dt_weekday = MapFunction.register(_dt_prop_map("weekday"))
dt_dayofyear = MapFunction.register(_dt_prop_map("dayofyear"))
dt_quarter = MapFunction.register(_dt_prop_map("quarter"))
dt_is_month_start = MapFunction.register(_dt_prop_map("is_month_start"))
dt_is_month_end = MapFunction.register(_dt_prop_map("is_month_end"))
dt_is_quarter_start = MapFunction.register(_dt_prop_map("is_quarter_start"))
dt_is_quarter_end = MapFunction.register(_dt_prop_map("is_quarter_end"))
dt_is_year_start = MapFunction.register(_dt_prop_map("is_year_start"))
dt_is_year_end = MapFunction.register(_dt_prop_map("is_year_end"))
dt_is_leap_year = MapFunction.register(_dt_prop_map("is_leap_year"))
dt_daysinmonth = MapFunction.register(_dt_prop_map("daysinmonth"))
dt_days_in_month = MapFunction.register(_dt_prop_map("days_in_month"))
dt_tz = MapReduceFunction.register(
_dt_prop_map("tz"), lambda df: | pandas.DataFrame(df.iloc[0]) | pandas.DataFrame |
"""
The data_cleaner module is used to clean missing or NaN values from pandas dataframes (e.g. removing NaN, imputation, etc.)
"""
import pandas as pd
import numpy as np
import logging
from sklearn.preprocessing import Imputer
import os
from scipy.linalg import orth
log = logging.getLogger('mastml')
def flag_outliers(df, conf_not_input_features, savepath, n_stdevs=3):
"""
Method that scans values in each X feature matrix column and flags values that are larger than 3 standard deviations
from the average of that column value. The index and column values of potentially problematic points are listed and
written to an output file.
Args:
df: (dataframe), pandas dataframe containing data
Returns:
None, just writes results to file
"""
n_rows = df.shape[0]
outlier_dict = dict()
for col in df.columns:
outlier_rows = list()
outlier_vals = list()
if col not in conf_not_input_features:
avg = np.average(df[col])
stdev = np.std(df[col])
for row in range(n_rows):
if df[col].iloc[row] > avg + n_stdevs*stdev:
outlier_rows.append(row)
outlier_vals.append(df[col].iloc[row])
elif df[col].iloc[row] < avg - n_stdevs*stdev:
outlier_rows.append(row)
outlier_vals.append(df[col].iloc[row])
else:
pass
outlier_dict[col] = (outlier_rows, outlier_vals)
pd.DataFrame().from_dict(data=outlier_dict,orient='index', columns=['Indices', 'Values']).to_excel(os.path.join(savepath,'data_potential_outliers.xlsx'))
return
def remove(df, axis):
"""
Method that removes a full column or row of data values if one column or row contains NaN or is blank
Args:
df: (dataframe), pandas dataframe containing data
axis: (int), whether to remove rows (axis=0) or columns (axis=1)
Returns:
df: (dataframe): dataframe with NaN or missing values removed
"""
df_nan = df[pd.isnull(df)]
nan_indices = df_nan.index
df = df.dropna(axis=axis, how='any')
return df, nan_indices
def imputation(df, strategy, cols_to_leave_out=None):
"""
Method that imputes values to the missing places based on the median, mean, etc. of the data in the column
Args:
df: (dataframe), pandas dataframe containing data
strategy: (str), method of imputation, e.g. median, mean, etc.
cols_to_leave_out: (list), list of column indices to not include in imputation
Returns:
df: (dataframe): dataframe with NaN or missing values resolved via imputation
"""
col_names = df.columns.tolist()
if cols_to_leave_out is None:
df_imputed = pd.DataFrame(Imputer(missing_values='NaN', strategy=strategy, axis=0).fit_transform(df))
else:
df_include = df.drop(cols_to_leave_out, axis=1)
df_hold_out = df.drop([c for c in df.columns if c not in cols_to_leave_out], axis=1)
df_imputed = pd.DataFrame(Imputer(missing_values='NaN', strategy=strategy, axis=0).fit_transform(df_include), columns=df_include.columns)
# Need to join the imputed dataframe with the columns containing strings that were held out
if cols_to_leave_out is None:
df = df_imputed
else:
df = | pd.concat([df_hold_out, df_imputed], axis=1) | pandas.concat |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
class TestDataFrameIsIn:
def test_isin(self):
# GH#4211
df = DataFrame(
{
"vals": [1, 2, 3, 4],
"ids": ["a", "b", "f", "n"],
"ids2": ["a", "n", "c", "n"],
},
index=["foo", "bar", "baz", "qux"],
)
other = ["a", "b", "c"]
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])])
def test_isin_empty(self, empty):
# GH#16991
df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
expected = DataFrame(False, df.index, df.columns)
result = df.isin(empty)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
d = {"A": ["a"]}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, "A"] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
df.columns = ["A", "A"]
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, "A"] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH#4763
df = DataFrame(
{
"vals": [1, 2, 3, 4],
"ids": ["a", "b", "f", "n"],
"ids2": ["a", "n", "c", "n"],
},
index=["foo", "bar", "baz", "qux"],
)
with pytest.raises(TypeError):
df.isin("a")
with pytest.raises(TypeError):
df.isin("aaa")
def test_isin_df(self):
df1 = DataFrame({"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]})
df2 = DataFrame({"A": [0, 2, 12, 4], "B": [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected["A"].loc[[1, 3]] = True
expected["B"].loc[[0, 2]] = True
tm.assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ["A", "C"]
result = df1.isin(df2)
expected["B"] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
# GH#16394
df = pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "f"]})
df["C"] = list(zip(df["A"], df["B"]))
result = df["C"].isin([(1, "a")])
tm.assert_series_equal(result, Series([True, False, False], name="C"))
def test_isin_df_dupe_values(self):
df1 = DataFrame({"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]], columns=["B", "B"])
with pytest.raises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame(
[[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=["A", "B"],
index=[0, 0, 1, 1],
)
with pytest.raises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ["B", "B"]
with pytest.raises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({"A": [1, 0, 1, 0], "B": [1, 1, 0, 0]})
df = | DataFrame([[1, 1], [1, 0], [0, 0]], columns=["A", "A"]) | pandas.DataFrame |
from datetime import time
import numpy as np
import pytest
from pandas import DataFrame, date_range
import pandas._testing as tm
class TestBetweenTime:
def test_between_time(self, close_open_fixture):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
inc_start, inc_end = close_open_fixture
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert t >= stime
else:
assert t > stime
if inc_end:
assert t <= etime
else:
assert t < etime
result = ts.between_time("00:00", "01:00")
expected = ts.between_time(stime, etime)
tm.assert_frame_equal(result, expected)
# across midnight
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert (t >= stime) or (t <= etime)
else:
assert (t > stime) or (t <= etime)
if inc_end:
assert (t <= etime) or (t >= stime)
else:
assert (t < etime) or (t >= stime)
def test_between_time_raises(self):
# GH#20725
df = DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.between_time(start_time="00:00", end_time="12:00")
def test_between_time_axis(self, axis):
# GH#8839
rng = date_range("1/1/2000", periods=100, freq="10min")
ts = DataFrame(np.random.randn(len(rng), len(rng)))
stime, etime = ("08:00:00", "09:00:00")
exp_len = 7
if axis in ["index", 0]:
ts.index = rng
assert len(ts.between_time(stime, etime)) == exp_len
assert len(ts.between_time(stime, etime, axis=0)) == exp_len
if axis in ["columns", 1]:
ts.columns = rng
selected = ts.between_time(stime, etime, axis=1).columns
assert len(selected) == exp_len
def test_between_time_axis_raises(self, axis):
# issue 8839
rng = date_range("1/1/2000", periods=100, freq="10min")
mask = np.arange(0, len(rng))
rand_data = np.random.randn(len(rng), len(rng))
ts = | DataFrame(rand_data, index=rng, columns=rng) | pandas.DataFrame |
import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash
import plotly.graph_objects as go
import plotly.figure_factory as ff
from dash.dependencies import Input, Output
import calendar
import datetime
from datetime import datetime
import pandas as pd
import numpy as np
import copy
from app import app
y_test = pd.read_csv("./Data/y_test.csv", index_col=0)
test_time = pd.read_csv("./Data/test_time.csv", index_col=0)
y_pred = pd.read_csv('./Data/test_results_decoded.csv', index_col=0).loc[:, 'test_pred_xgb']
df = test_time.join(y_pred)
df['DISCHTIME'] = | pd.to_datetime(df.DISCHTIME) | pandas.to_datetime |
import argparse
import logging
import multiprocessing as mp
import os
import pickle
import re
import sys
import warnings
from datetime import datetime
from itertools import product
import pandas as pd
import tabulate
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from greenguard import get_pipelines
from greenguard.demo import load_demo
from greenguard.loaders import CSVLoader
from greenguard.metrics import (METRICS, accuracy_score, f1_score,
fpr_score, tpr_score, threshold_score)
from greenguard.pipeline import GreenGuardPipeline, generate_init_params, generate_preprocessing
from greenguard.results import load_results, write_results
LOGGER = logging.getLogger(__name__)
DEFAULT_TUNING_METRIC_KWARGS = {'threshold': 0.5}
LEADERBOARD_COLUMNS = [
'problem_name',
'window_size',
'resample_rule',
'template',
'default_test',
'default_cv',
'tuned_cv',
'tuned_test',
'tuning_metric',
'tuning_metric_kwargs',
'fit_predict_time',
'default_cv_time',
'average_cv_time',
'total_time',
'status',
]
def _scorer(metric, metric_args):
if isinstance(metric, str):
metric, cost = METRICS[metric]
def f(expected, observed):
try:
return metric(expected, observed, **metric_args)
except TypeError:
if 'threshold' not in metric_args:
raise
kwargs = metric_args.copy()
threshold = kwargs.pop('threshold')
observed = observed >= threshold
return metric(expected, observed, **kwargs)
return f
def _build_init_params(template, window_size, rule, template_params):
if 'dfs' in template:
window_size_rule_params = {
'pandas.DataFrame.resample#1': {
'rule': rule,
},
'featuretools.dfs.json#1': {
'training_window': window_size,
}
}
elif 'lstm' in template:
window_size_rule_params = {
'pandas.DataFrame.resample#1': {
'rule': rule,
},
'mlprimitives.custom.timeseries_preprocessing.cutoff_window_sequences#1': {
'window_size': window_size,
}
}
for primitive, params in window_size_rule_params.items():
primitive_params = template_params.setdefault(primitive, {})
primitive_params.update(params)
return template_params
def evaluate_template(
template,
target_times,
readings,
tuning_iterations=50,
init_params=None,
preprocessing=0,
metrics=None,
threshold=None,
tpr=None,
tuning_metric='roc_auc_score',
tuning_metric_kwargs=DEFAULT_TUNING_METRIC_KWARGS,
cost=False,
cv_splits=3,
test_size=0.25,
random_state=0,
cache_path=None,
scores={}
):
"""Returns the scores for a given template.
Args:
template (str):
Given template to evaluate.
target_times (DataFrame):
Contains the specefication problem that we are solving, which has three columns:
* turbine_id: Unique identifier of the turbine which this label corresponds to.
* cutoff_time: Time associated with this target.
* target: The value that we want to predict. This can either be a numerical value
or a categorical label. This column can also be skipped when preparing
data that will be used only to make predictions and not to fit any
pipeline.
readings (DataFrame):
Contains the signal data from different sensors, with the following columns:
* turbine_id: Unique identifier of the turbine which this reading comes from.
* signal_id: Unique identifier of the signal which this reading comes from.
* timestamp (datetime): Time where the reading took place, as a datetime.
* value (float): Numeric value of this reading.
metric (function or str):
Metric to use. If an ``str`` is give it must be one of the metrics
defined in the ``greenguard.metrics.METRICS`` dictionary.
tuning_iterations (int):
Number of iterations to be used.
preprocessing (int, list or dict):
Number of preprocessing steps to be used.
init_params (list):
Initialization parameters for the pipeline.
cost (bool):
Wheter the metric is a cost function (the lower the better) or not.
test_size (float):
Percentage of the data set to be used for the test.
cv_splits (int):
Amount of splits to create.
random_state (int):
Random number of train_test split.
cache_path (str):
If given, cache the generated cross validation splits in this folder.
Defatuls to ``None``.
Returns:
scores (dict):
Stores the four types of scores that are being evaluate.
"""
start_time = datetime.utcnow()
scores['tuning_metric'] = str(tuning_metric)
scores['tuning_metric_kwargs'] = tuning_metric_kwargs
tuning_metric = _scorer(tuning_metric, tuning_metric_kwargs)
train, test = train_test_split(target_times, test_size=test_size, random_state=random_state)
pipeline = GreenGuardPipeline(
template,
metric=tuning_metric,
cost=cost,
cv_splits=cv_splits,
init_params=init_params,
preprocessing=preprocessing,
cache_path=cache_path
)
# Computing the default test score
fit_predict_time = datetime.utcnow()
pipeline.fit(train, readings)
predictions = pipeline.predict(test, readings)
fit_predict_time = datetime.utcnow() - fit_predict_time
scores['default_test'] = tuning_metric(test['target'], predictions)
# Computing the default cross validation score
default_cv_time = datetime.utcnow()
session = pipeline.tune(train, readings)
session.run(1)
default_cv_time = datetime.utcnow() - default_cv_time
scores['default_cv'] = pipeline.cv_score
# Computing the cross validation score with tuned hyperparameters
average_cv_time = datetime.utcnow()
session.run(tuning_iterations)
average_cv_time = (datetime.utcnow() - average_cv_time) / tuning_iterations
scores['tuned_cv'] = pipeline.cv_score
# Computing the test score with tuned hyperparameters
pipeline.fit(train, readings)
predictions = pipeline.predict(test, readings)
ground_truth = test['target']
# compute different metrics
if tpr:
tpr = tpr if isinstance(tpr, list) else [tpr]
for value in tpr:
threshold = threshold_score(ground_truth, predictions, tpr)
scores[f'fpr_tpr/{value}'] = fpr_score(ground_truth, predictions, tpr=tpr)
predictions_classes = predictions >= threshold
scores[f'accuracy_tpr/{value}'] = accuracy_score(ground_truth, predictions_classes)
scores[f'f1_tpr/{value}'] = f1_score(ground_truth, predictions_classes)
scores[f'threshold_tpr/{value}'] = threshold_score(ground_truth, predictions, value)
if f'accuracy_tpr/{value}' not in LEADERBOARD_COLUMNS:
LEADERBOARD_COLUMNS.extend([
f'accuracy_tpr/{value}',
f'f1_tpr/{value}',
f'fpr_tpr/{value}',
f'threshold_tpr/{value}',
])
else:
threshold = 0.5 if threshold is None else threshold
threshold = threshold if isinstance(threshold, list) else [threshold]
for value in threshold:
scores[f'fpr_threshold/{value}'] = fpr_score(
ground_truth, predictions, threshold=value)
predictions_classes = predictions >= threshold
scores[f'accuracy_threshold/{value}'] = accuracy_score(
ground_truth, predictions_classes)
scores[f'f1_threshold/{value}'] = f1_score(ground_truth, predictions_classes)
scores[f'tpr_threshold/{value}'] = tpr_score(ground_truth, predictions, value)
if f'accuracy_threshold/{value}' not in LEADERBOARD_COLUMNS:
LEADERBOARD_COLUMNS.extend([
f'accuracy_threshold/{value}',
f'f1_threshold/{value}',
f'fpr_threshold/{value}',
f'tpr_threshold/{value}',
])
scores['tuned_test'] = tuning_metric(test['target'], predictions)
scores['fit_predict_time'] = fit_predict_time
scores['default_cv_time'] = default_cv_time
scores['average_cv_time'] = average_cv_time
scores['total_time'] = datetime.utcnow() - start_time
return scores
def evaluate_templates(
templates,
window_size_rule,
tuning_iterations=50,
init_params=None,
preprocessing=0,
metrics=None,
threshold=None,
tpr=None,
tuning_metric='roc_auc_score',
tuning_metric_kwargs=DEFAULT_TUNING_METRIC_KWARGS,
target_times=None,
readings=None,
cost=False,
test_size=0.25,
cv_splits=3,
random_state=0,
cache_path=None,
cache_results=None,
problem_name=None,
output_path=None,
progress_bar=None,
multiprocess=False
):
"""Execute the benchmark process and optionally store the result as a ``CSV``.
Args:
templates (list):
List of templates to try.
window_size_rule (list):
List of tuples (int, str or Timedelta object).
metric (function or str):
Metric to use. If an ``str`` is give it must be one of the metrics
defined in the ``greenguard.metrics.METRICS`` dictionary.
tuning_iterations (int):
Number of iterations to be used.
init_params (dict):
Initialization parameters for the pipelines.
target_times (DataFrame):
Contains the specefication problem that we are solving, which has three columns:
* turbine_id: Unique identifier of the turbine which this label corresponds to.
* cutoff_time: Time associated with this target.
* target: The value that we want to predict. This can either be a numerical value
or a categorical label. This column can also be skipped when preparing
data that will be used only to make predictions and not to fit any
pipeline.
readings (DataFrame):
Contains the signal data from different sensors, with the following columns:
* turbine_id: Unique identifier of the turbine which this reading comes from.
* signal_id: Unique identifier of the signal which this reading comes from.
* timestamp (datetime): Time where the reading took place, as a datetime.
* value (float): Numeric value of this reading.
preprocessing (int, list or dict):
Number of preprocessing steps to be used.
cost (bool):
Wheter the metric is a cost function (the lower the better) or not.
test_size (float):
Percentage of the data set to be used for the test.
cv_splits (int):
Amount of splits to create.
random_state (int):
Random number of train_test split.
output_path (str):
Path where to save the benchmark report.
cache_path (str):
If given, cache the generated cross validation splits in this folder.
Defatuls to ``None``.
Returns:
pandas.DataFrame or None:
If ``output_path`` is ``None`` it will return a ``pandas.DataFrame`` object,
else it will dump the results in the specified ``output_path``.
Example:
>>> from sklearn.metrics import f1_score
>>> templates = [
... 'normalize_dfs_xgb_classifier',
... 'unstack_lstm_timeseries_classifier'
... ]
>>> window_size_rule = [
... ('30d','12h'),
... ('7d','4h')
... ]
>>> preprocessing = [0, 1]
>>> scores_df = evaluate_templates(
... templates=templates,
... window_size_rule=window_size_rule,
... metric=f1_score,
... tuning_iterations=5,
... preprocessing=preprocessing,
... cost=False,
... test_size=0.25,
... cv_splits=3,
... random_state=0
... )
>>> scores_df
template window_size resample_rule default_test default_cv tuned_cv tuned_test status
0 unstack_lstm_timeseries_classifier 30d 12h 0.720000 0.593634 0.627883 0.775510 OK
1 unstack_lstm_timeseries_classifier 7d 4h 0.723404 0.597440 0.610766 0.745098 OK
2 normalize_dfs_xgb_classifier 30d 12h 0.581818 0.619698 0.637123 0.596491 OK
3 normalize_dfs_xgb_classifier 7d 4h 0.581818 0.619698 0.650367 0.603774 OK
""" # noqa
if readings is None and target_times is None:
target_times, readings = load_demo()
init_params = generate_init_params(templates, init_params)
preprocessing = generate_preprocessing(templates, preprocessing)
scores_list = []
for template, window_rule in product(templates, window_size_rule):
window_size, rule = window_rule
try:
LOGGER.info('Evaluating template %s on problem %s (%s, %s)',
template, problem_name, window_size, rule)
template_params = init_params[template]
template_params = _build_init_params(template, window_size, rule, template_params)
template_preprocessing = preprocessing[template]
if multiprocess:
manager = mp.Manager()
scores = manager.dict()
process = mp.Process(
target=evaluate_template,
args=(
template,
target_times,
readings,
tuning_iterations,
init_params,
preprocessing,
metrics,
threshold,
tpr,
tuning_metric,
tuning_metric_kwargs,
cost,
cv_splits,
test_size,
random_state,
cache_path,
scores
)
)
process.start()
process.join()
if 'tuned_test' not in scores:
scores['status'] = 'ERRORED'
scores = dict(scores) # parse the managed dict to dict for pandas.
else:
scores = dict()
scores['problem_name'] = problem_name
scores['template'] = template
scores['window_size'] = window_size
scores['resample_rule'] = rule
result = evaluate_template(
template=template,
target_times=target_times,
readings=readings,
metrics=metrics,
tuning_metric=tuning_metric,
tuning_metric_kwargs=tuning_metric_kwargs,
threshold=threshold,
tpr=tpr,
tuning_iterations=tuning_iterations,
preprocessing=template_preprocessing,
init_params=template_params,
cost=cost,
test_size=test_size,
cv_splits=cv_splits,
random_state=random_state,
cache_path=cache_path
)
scores.update(result)
scores['status'] = 'OK'
except Exception:
scores['status'] = 'ERRORED'
LOGGER.exception('Could not score template %s ', template)
if cache_results:
os.makedirs(cache_results, exist_ok=True)
template_name = template
if os.path.isfile(template_name):
template_name = os.path.basename(template_name).replace('.json', '')
file_name = '{}_{}_{}_{}.csv'.format(problem_name, template_name, window_size, rule)
df = pd.DataFrame([scores]).reindex(LEADERBOARD_COLUMNS, axis=1)
df.to_csv(os.path.join(cache_results, file_name), index=False)
scores_list.append(scores)
if progress_bar:
progress_bar.update(1)
results = pd.DataFrame.from_records(scores_list)
results = results.reindex(LEADERBOARD_COLUMNS, axis=1)
if output_path:
LOGGER.info('Saving benchmark report to %s', output_path)
results.to_csv(output_path)
else:
return results
def _generate_target_times_readings(target_times, readings_path, window_size, rule, signals):
"""
Returns:
pandas.DataFrame:
Table of readings for the target times, including the columns ``turbine_id``,
``signal_id``, ``timestamp`` and ``value``.
"""
csv_loader = CSVLoader(
readings_path,
rule=rule,
)
return csv_loader.load(target_times, window_size=window_size, signals=signals)
def make_problems(target_times_paths, readings_path, window_size_resample_rule,
output_path=None, signals=None):
"""Make problems with the target times and readings for each window size and resample rule.
Create problems in the accepted format by ``run_benchmark`` as pickle files containing:
* ``target_times``: ``pandas.DataFrame`` containing the target times.
* ``readings``: ``pandas.DataFrame`` containing the readings for the target times.
* ``window_size``: window size value used.
* ``resample_rule``: resample rule value used.
Or return a ``dict`` containing as keys the names of the problems generated and tuples with
the previously specified fields of target times, readings, window size and resample rule.
Args:
target_times_paths (list):
List of paths to CSVs that contain target times.
readings_path (str):
Path to the folder where readings in raw CSV format can be found.
window_size_resample_rule (list):
List of tuples (int, str or Timedelta object).
output_path (str):
Path to save the generated problems.
signals (str):
List of signal names or csv file that has a `signal_id` column to use as the signal
names list.
"""
if isinstance(target_times_paths, str):
target_times_paths = [target_times_paths]
if isinstance(target_times_paths, list):
target_times_paths = {
os.path.basename(path).replace('.csv', ''): path
for path in target_times_paths
}
if output_path:
generated_problems = list()
else:
generated_problems = {}
if isinstance(signals, str) and os.path.exists(signals):
signals = pd.read_csv(signals).signal_id
for problem_name, target_time_path in tqdm(target_times_paths.items()):
for window_size, rule in window_size_resample_rule:
target_times = pd.read_csv(target_time_path, parse_dates=['cutoff_time'])
new_target_times, readings = _generate_target_times_readings(
target_times,
readings_path,
window_size,
rule,
signals=signals,
)
pickle_name = '{}_{}_{}'.format(problem_name, window_size, rule)
if output_path:
os.makedirs(output_path, exist_ok=True)
output_pickle_path = os.path.join(output_path, pickle_name + '.pkl')
with open(output_pickle_path, 'wb') as pickle_file:
pickle.dump((new_target_times, readings, window_size, rule), pickle_file)
generated_problems.append(output_pickle_path)
else:
generated_problems[pickle_name] = (new_target_times, readings, window_size, rule)
return generated_problems
def run_benchmark(templates, problems, window_size_resample_rule=None,
tuning_iterations=50, signals=None, preprocessing=0, init_params=None,
metrics=None, threshold=None, tpr=None, tuning_metric='roc_auc_score',
tuning_metric_kwargs=DEFAULT_TUNING_METRIC_KWARGS, cost=False, cv_splits=5,
test_size=0.33, random_state=0, cache_path=None, cache_results=None,
output_path=None, multiprocess=False):
"""Execute the benchmark function and optionally store the result as a ``CSV``.
This function provides a user-friendly interface to interact with the ``evaluate_templates``
function. It allows the user to specify an ``output_path`` where the results can be
stored. If this path is not provided, a ``pandas.DataFrame`` will be returned.
This function evaluates each template against each problem for each window size and resample
rule possible, and will tune each teamplate for the given amount of tuning iterations.
The problems can be a pickle file that contains the following values:
* ``target_times``: ``pandas.DataFrame`` containing the target times.
* ``readings``: ``pandas.DataFrame`` containing the readings for the target times.
* ``window_size``: window size value used.
* ``resample_rule``: resample rule value used.
Or it can be dictionary containing the problem's name and as values either a path to a pickle
file or a tuple containing the previously specified fields.
Args:
templates (str or list):
Name of the json pipelines that will be evaluated against the problems.
problems (str, list or dict):
There are three possible values for problems:
* ``str``: Path to a given problem stored as a pickle file (pkl).
* ``list``: List of paths to given problems stored as a pickle files (pkl).
* ``dict``: A dict containing as keys the name of the problem and as value the
path to a pickle file or a tuple with target times and readings data
frames and the window size and resample rule used to generate this
problem.
The pickle files has to contain a tuple with target times and readings data frames and
the window size and resample rule used to generate that problem. We recommend using
the function ``make_problems`` to generate those files.
window_size_resample_rule (list):
List of tuples (int, str or Timedelta object).
tuning_iterations (int):
Amount of tuning iterations to perfrom over each template.
signals (str or list):
Path to a csv file containing ``signal_id`` column that we would like to use or a
``list`` of signals that we would like to use. If ``None`` use all the signals from
the readings.
preprocessing (int, dict or list):
There are three possible values for preprocessing:
* ``int``: the value will be used for all templates.
* ``dict`` with the template name as a key and a number as a value, will
be used for that template.
* ``list``: each value will be assigned to the corresponding position of
self.templates.
Defaults to ``0``.
init_params (dict or list):
There are three possible values for init_params:
* Init params ``dict``: It will be used for all templates.
* ``dict`` with the name of the template as a key and dictionary with its
init params.
* ``list``: each value will be assigned to the corresponding position of
self.templates.
Defaults to ``None``.
metric (function or str):
Metric to use. If an ``str`` is give it must be one of the metrics
defined in the ``greenguard.metrics.METRICS`` dictionary.
cost (bool):
Whether the metric is a cost function (the lower the better) or not.
Defaults to ``False``.
cv_splits (int):
Number of cross validation folds to use. Defaults to ``5``.
test_size (float):
Amount of data that will be saved for test, represented in percentage between 0 and 1.
random_state (int or RandomState):
random state to use for the cross validation partitioning. Defaults to ``0``.
cache_path (str):
If given, cache the generated cross validation splits in this folder.
Defatuls to ``None``.
cache_results (str):
If provided, store the progress of each pipeline and each problem while runing.
output_path (str):
If provided, store the results to the given filename. Defaults to ``None``.
"""
templates = templates if isinstance(templates, (list, tuple)) else [templates]
results = list()
if isinstance(problems, str):
problems = [problems]
if isinstance(problems, list):
problems = {
os.path.basename(problem).replace('.pkl', ''): problem
for problem in problems
}
if signals is not None:
if isinstance(signals, str) and os.path.exists(signals):
signals = pd.read_csv(signals).signal_id
total_runs = len(templates) * len(problems) * len(window_size_resample_rule or [1])
pbar = tqdm(total=total_runs)
for problem_name, problem in problems.items():
# remove window_size resample_rule nomenclature from the problem's name
problem_name = re.sub(r'\_\d+[DdHhMmSs]', r'', problem_name)
if isinstance(problem, str):
with open(problem, 'rb') as pickle_file:
target_times, readings, orig_window_size, orig_rule = pickle.load(pickle_file)
else:
target_times, readings, orig_window_size, orig_rule = problem
if signals is not None:
readings = readings[readings.signal_id.isin(signals)]
wsrr = window_size_resample_rule or [(orig_window_size, orig_rule)]
orig_window_size = pd.to_timedelta(orig_window_size)
orig_rule = | pd.to_timedelta(orig_rule) | pandas.to_timedelta |
# -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific'])
def tz(request):
return request.param
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(
params=[
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')],
ids=lambda x: type(x).__name__)
def addend(request):
return request.param
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
def test_dti_radd_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz, one):
# Variants of `one` for #19012
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
def test_dti_add_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
rng += delta
tm.assert_index_equal(rng, expected)
def test_dti_sub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
def test_dti_isub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
rng -= delta
| tm.assert_index_equal(rng, expected) | pandas.util.testing.assert_index_equal |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 28 09:19:10 2022
@author: BM109X32G-10GPU-02
"""
import torch
import pandas as pd
import train
import predict
test = train.train('../dataset/world_wide.txt')
f =pd.read_table('../dataset/zinc15.txt')
#predict = predict.predict('../dataset/world_wide.txt',property=True)
torch.cuda.empty_cache()
for i in range (307):
file = f.iloc[1000*i:1000*i+1000,0]
file=pd.DataFrame(file)
file.to_csv('../dataset/zinc15/'+str(i)+'.txt',index=None)
for i in range (307):
predict = predict.predict('../dataset/zinc15/'+str(i)+'.txt',property=False)
res= | pd.DataFrame(predict) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
from scipy.stats import pearsonr
# from mpl_toolkits.axes_grid1 import host_subplot
# import mpl_toolkits.axisartist as AA
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.cm as cm
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
import matplotlib.transforms as transforms
import matplotlib.colors as colors
import netCDF4 as nc
from netCDF4 import Dataset
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
#########################################################################
## ----------------LECTURA DE LOS ARCHIVOS DE UMBRALES-----------------##
#########################################################################
df_UmbralH_Nube_348 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Umbrales_Horarios/UmbralH_Nube_348.csv', sep=',', index_col =0)
df_UmbralH_Nube_350 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Umbrales_Horarios/UmbralH_Nube_350.csv', sep=',', index_col =0)
df_UmbralH_Nube_975 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Umbrales_Horarios/UmbralH_Nube_975.csv', sep=',', index_col =0)
df_UmbralH_Nube = pd.concat([df_UmbralH_Nube_348, df_UmbralH_Nube_350, df_UmbralH_Nube_975], axis=1, sort=False)
df_UmbralH_Nube = df_UmbralH_Nube.mean(axis = 1, skipna = True)
df_UmbralH_Nube = | pd.DataFrame(df_UmbralH_Nube, columns=['Umbral']) | pandas.DataFrame |
# coding : utf-8
# created by cjr
import pandas as pd
def trip_id_count(train, test):
"""
每名用户的行程数
:param train:
:param test:
:return:
"""
train_data = train.groupby(["TERMINALNO"])["TRIP_ID"].max()
train_df = | pd.DataFrame(train_data) | pandas.DataFrame |
import sys
import time
import pandas as pd
import numpy as np
from datetime import datetime
def func5(gc, cursor):
wb = gc.open_by_url('https://docs.google.com/spreadsheets/d/1mOa_ipZ8xyzvpDcd3QoyRsows')
nexp = wb.worksheet('Downloads')
print('\nConectado ao Google Sheets:Dados Adobe / Downloads.')
print('Executando query...')
sql = """
SELECT
dpd.data as Data,
sum(dpd.downloads) as Downloads
from func5.dbo.Programas_Downloads dpd
LEFT JOIN func5.dbo.Programas dp ON dpd.codprog = dp.codprog
WHERE dp.TemGerenciador = 1 AND dpd.data >= '2021-01-01'
GROUP BY dpd.data
"""
start_time = time.time()
rows = cursor.execute(sql)
elapsed_time = time.time() - start_time
print(f'Tempo de consulta: {elapsed_time}s')
df = | pd.DataFrame.from_records(rows, columns=[col[0] for col in cursor.description]) | pandas.DataFrame.from_records |
import pandas as pd
import numpy as np
# list of the data files:
days = ["monday.csv","tuesday.csv","wednesday.csv","thursday.csv","friday.csv"]
# creating an empty dataframe for listing all the customer walks:
customer_walks = | pd.DataFrame(columns=["timestamp","customer_no","location","next_location"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
crime = | pd.read_csv("https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/04_Apply/US_Crime_Rates/US_Crime_Rates_1960_2014.csv") | pandas.read_csv |
from os import listdir
from os.path import isfile, join, abspath
import pandas as pd
import sys
import facial_validation_processor as fvp
import warnings
warnings.filterwarnings("ignore")
def import_file(dataset_path):
#Check format
if(dataset_path.endswith(('xlsx', 'xls','csv','dta')) is False):
return (False, 'Supported files are .csv, .dta, .xlsx, .xls')
# try:
if dataset_path.endswith(('xlsx', 'xls')):
dataset = pd.read_excel(dataset_path)
elif dataset_path.endswith('csv'):
dataset = pd.read_csv(dataset_path)
elif dataset_path.endswith('dta'):
try:
dataset = pd.read_stata(dataset_path)
except ValueError:
dataset = pd.read_stata(dataset_path, convert_categoricals=False)
label_dict = | pd.io.stata.StataReader(dataset_path) | pandas.io.stata.StataReader |
from torch.utils.data import DataLoader, Dataset
import cv2
import os
from utils import make_mask,mask2enc,make_mask_
import numpy as np
import pandas as pd
from albumentations import (HorizontalFlip, Normalize, Compose, Resize, RandomRotate90, Flip, RandomCrop, PadIfNeeded)
from albumentations.pytorch import ToTensor
from sklearn.model_selection import train_test_split,GroupKFold,KFold,GroupShuffleSplit
path = './input/'
RANDOM_STATE = 2019
class SteelDataset(Dataset):
def __init__(self, df, data_folder, mean, std, phase):
self.df = df
self.root = data_folder
self.mean = mean
self.std = std
self.phase = phase
self.transforms = get_transforms(phase, mean, std)
self.fnames = self.df.index.tolist()
def __getitem__(self, idx):
image_id, mask = make_mask(idx, self.df)
image_path = os.path.join(self.root, "train_images", image_id)
# img = Image.open(image_path)
# img = np.array(img)[:, :, 0]
img = cv2.imread(image_path)[:, :, 0]
img = img[:, :, np.newaxis]
augmented = self.transforms(image=img, mask=mask)
img = augmented['image']
mask = augmented['mask'] # 1x256x1600x4
mask = mask[0].permute(2, 0, 1) # 1x4x256x1600
return img, mask
def __len__(self):
return len(self.fnames)
class SteelDatasetCopped(Dataset):
def __init__(self, df, data_folder, mean= (0.41009), std= (0.16991), phase='train'):
self.df = df
self.root = data_folder
self.mean = (0.3959)
self.std = (0.1729)
self.phase = phase
self.transforms = get_transforms(phase, mean, std)
self.fnames = self.df.index.tolist()
def __getitem__(self, idx):
image_id, mask = make_mask_(idx, self.df)
# print(image_id)
image_path = os.path.join(self.root, "images", image_id)
try:
img = cv2.imread(image_path)[:, :, 0]
except Exception:
image_path = os.path.join(self.root, "images_n", image_id)
img = cv2.imread(image_path)[:, :, 0]
img = img[:, :, np.newaxis]
augmented = self.transforms(image=img, mask=mask)
img = augmented['image']
mask = augmented['mask'] # 1x256x1600x4
mask = mask[0].permute(2, 0, 1) # 1x4x256x1600
return img, mask
def __len__(self):
return len(self.fnames)
def get_transforms(phase, mean, std):
list_transforms = []
if phase == "train":
list_transforms.extend(
[
# PadIfNeeded(min_height=256, min_width=256),
# RandomCrop(height=256, width=256, p=1),
# RandomCrop(height=224, width=224, p=1),
HorizontalFlip(p=0.5), # only horizontal flip as of now
Flip(p=0.5),
# RandomRotate90(p=0.5),
# PadIfNeeded(min_height=256, min_width=256)
]
)
else:
list_transforms.extend(
[
PadIfNeeded(min_height=256, min_width=256),
]
)
list_transforms.extend(
[
Normalize(mean=mean, std=std, p=1),
ToTensor(),
]
)
list_trfms = Compose(list_transforms)
return list_trfms
def provider(
data_folder,
df_path,
phase,
mean=None,
std=None,
batch_size=4,
num_workers=4,
cropped=False
):
'''Returns dataloader for the model training'''
if cropped ==False:
df = pd.read_csv(df_path)
# some preprocessing
# https://www.kaggle.com/amanooo/defect-detection-starter-u-net
df['ImageId'], df['ClassId'] = zip(*df['ImageId_ClassId'].str.split('_'))
df['ClassId'] = df['ClassId'].astype(int)
df = df.pivot(index='ImageId', columns='ClassId', values='EncodedPixels')
df['defects'] = df.count(axis=1)
train_df, val_df = train_test_split(df, test_size=0.2, stratify=df["defects"], random_state=RANDOM_STATE)
df = train_df if phase == "train" else val_df
image_dataset = SteelDataset(df, data_folder, mean, std, phase)
dataloader = DataLoader(
image_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
shuffle=True,
)
else:
if os.path.exists('./other_thing/cropped_df.csv'):
df_ = pd.read_csv('./other_thing/cropped_df.csv')
df_ = df_.fillna('')
else:
print('Prepare rle ing')
df = pd.DataFrame()
df['ImageId'] = os.listdir('./other_thing/images')
df['Image'] = df['ImageId'].apply(lambda x: x.split('.')[0][:-2])
predictions = []
for imgid in os.listdir('./other_thing/images'):
mask = cv2.imread('./other_thing/masks/'+imgid)
rles = mask2enc(mask)
predictions.append(rles)
img_neg = pd.read_csv('./input/pred.csv')
img_neg = img_neg['fname'].unique()[:15000]
df2 = pd.DataFrame()
df2['ImageId'] = img_neg
df2['Image'] = df2['ImageId'].apply(lambda x: x.split('.')[0][:-2])
predictions2 = [['', '', '', '']]*15000
df_ = | pd.DataFrame(predictions2+predictions, columns=[1, 2, 3, 4]) | pandas.DataFrame |
""" Normalizing flow architecture class definitions for param distributions. """
import numpy as np
import scipy.stats
import matplotlib.pyplot as plt
import seaborn as sns
import os
import pickle
import time
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import tensorshape_util
tfb = tfp.bijectors
tfd = tfp.distributions
from epi.batch_norm import BatchNormalization
import tensorflow.compat.v1 as tf1
import pandas as pd
from epi.error_formatters import format_type_err_msg
from epi.util import (
gaussian_backward_mapping,
np_column_vec,
get_hash,
set_dir_index,
array_str,
dbg_check,
)
DTYPE = tf.float32
EPS = 1e-6
class NormalizingFlow(tf.keras.Model):
"""Normalizing flow network for approximating parameter distributions.
The normalizing flow is constructed via stage(s) of either coupling or
autoregressive transforms of :math:`q_0`. Coupling transforms are real NVP bijectors
where each conditional distribution has the same number of neural network
layers and units. One stage is one coupling (second half of elements are
conditioned on the first half (see :obj:`tfp.bijectors.RealNVP`)). Similarly,
autoregressive transforms are masked autoregressive flow (MAF) bijectors. One stage
is one full autoregressive factorization (see :obj:`tfp.bijectors.MAF`).
After each stage, which is succeeded by another coupling or autoregressive
transform, the dimensions are permuted via a Glow permutation.
This facilitates randomized conditioning (real NVP) and
factorization orderings (MAF) at each stage.
E.g. :obj:`arch_type='autoregressive', num_stages=2`
:math:`q_0` -> MAF -> permute -> MAF -> ...
We parameterize the final processing stages of the normalizing flow (a deep
generative model) via post_affine and bounds.
To facilitate scaling and shifting of the normalizing flow up to this point,
one can set post_affine to True.
E.g. :obj:`arch_type='autoregressive', num_stages=2, post_affine=True`
:math:`q_0` -> MAF -> permute -> batch norm -> MAF -> affine -> ...
By setting bounds to a tuple (lower_bound, upper_bound), the final step
in the normalizing flow maps to the support of the distribution using an
:obj:`epi.normalizing_flows.IntervalFlow`.
E.g. :obj:`arch_type='autoregressive', num_stages=2, post_affine=True, bounds=(lb,ub)`
:math:`q_0` -> MAF -> permute -> batch norm -> MAF -> post affine -> interval flow
The base distribution :math:`q_0` is chosen to be a standard isotoropic gaussian.
Transforms of coupling and autoregressive layers can be parameterized as
an 'affine' function or a 'spline' via parameter elemwise_fn.
:param arch_type: :math:`\\in` `['autoregressive', 'coupling']`
:type arch_type: str
:param D: Dimensionality of the normalizing flow.
:type D: int
:param num_stages: Number of coupling or autoregressive stages.
:type num_stages: int
:param num_layers: Number of neural network layer per conditional.
:type num_layers: int
:param num_units: Number of units per layer.
:type num_units: int
:type elemwise_fn: str, optional
:param elemwise_fn: Inter-stage bijector `\\in` :obj:`['affine', 'spline']`, defaults to 'affine'.
:param num_bins: Number of bins when elemwise_fn is spline.
:type num_bins: int, optional
:param batch_norm: Use batch normalization between stages, defaults to True.
:type batch_norm: bool, optional
:param bn_momentum: Batch normalization momentum parameter, defaults to 0.99.
:type bn_momentrum: float, optional
:param post_affine: Shift and scale following main transform.
:type post_affine: bool, optional
:param bounds: Bounds of distribution support, defaults to None.
:type bounds: (np.ndarray, np.ndarray), optional
:param random_seed: Random seed of architecture parameters, defaults to 1.
:type random_seed: int, optional
"""
def __init__(
self,
arch_type,
D,
num_stages,
num_layers,
num_units,
elemwise_fn="affine",
num_bins=4,
batch_norm=True,
bn_momentum=0.0,
post_affine=True,
bounds=None,
random_seed=1,
):
"""Constructor method."""
super(NormalizingFlow, self).__init__()
self._set_arch_type(arch_type)
self._set_D(D)
self._set_num_stages(num_stages)
self._set_num_layers(num_layers)
self._set_num_units(num_units)
self._set_elemwise_fn(elemwise_fn)
if self.arch_type == "autoregressive" and self.elemwise_fn == "spline":
raise NotImplementedError(
"Error: MAF flows with splines are not implemented yet."
)
self._set_num_bins(num_bins)
self._set_batch_norm(batch_norm)
if not self.batch_norm:
self.bn_momentum = None
self._set_post_affine(post_affine)
self._set_bounds(bounds)
self._set_random_seed(random_seed)
self.stages = []
# self.shift_and_log_scale_fns = []
self.bijector_fns = []
self.permutations = []
if self.batch_norm:
self.batch_norms = []
self.q0 = tfd.MultivariateNormalDiag(loc=self.D * [0.0])
bijectors = []
np.random.seed(self.random_seed)
for i in range(num_stages):
if arch_type == "coupling":
num_masked = self.D // 2
if elemwise_fn == "affine":
bijector_fn = tfb.real_nvp_default_template(
hidden_layers=num_layers * [num_units]
)
stage = tfb.RealNVP(
num_masked=num_masked, shift_and_log_scale_fn=bijector_fn
)
self.bijector_fns.append(bijector_fn)
elif elemwise_fn == "spline":
bijector_fn = SplineParams(
D - num_masked, num_layers, num_units, self.num_bins, B=4.0
)
stage = tfb.RealNVP(num_masked=num_masked, bijector_fn=bijector_fn)
# make parameters visible to autograd
self.bijector_fns.append(bijector_fn._bin_widths)
self.bijector_fns.append(bijector_fn._bin_heights)
self.bijector_fns.append(bijector_fn._knot_slopes)
elif arch_type == "autoregressive":
# Splines are not implemented for MAF networks yet!
bijector_fn = tfb.AutoregressiveNetwork(
params=2, hidden_units=num_layers * [num_units]
)
stage = tfb.MaskedAutoregressiveFlow(shift_and_log_scale_fn=bijector_fn)
self.bijector_fns.append(bijector_fn)
self.stages.append(stage)
bijectors.append(stage)
# self.shift_and_log_scale_fns.append(shift_and_log_scale_fn)
self.bijector_fns.append(bijector_fn)
if i < self.num_stages - 1:
lower_upper, perm = trainable_lu_factorization(self.D)
perm_i = tfp.bijectors.ScaleMatvecLU(
lower_upper, perm, validate_args=False, name=None
)
self.permutations.append(perm_i)
bijectors.append(perm_i)
if self.batch_norm:
bn = tf.keras.layers.BatchNormalization(momentum=bn_momentum)
batch_norm_i = BatchNormalization(batchnorm_layer=bn)
self.batch_norms.append(batch_norm_i)
bijectors.append(batch_norm_i)
if self.post_affine:
self.a = tf.Variable(initial_value=tf.ones((D,)), name="a")
self.b = tf.Variable(initial_value=tf.zeros((D,)), name="b")
self.scale = tfb.Scale(scale=self.a)
self.shift = tfb.Shift(shift=self.b)
self.PA = tfb.Chain([self.shift, self.scale])
bijectors.append(self.PA)
# TODO Make this "or" ?
if self.lb is not None and self.ub is not None:
self.support_mapping = IntervalFlow(self.lb, self.ub)
bijectors.append(self.support_mapping)
bijectors.reverse()
self.trans_dist = tfd.TransformedDistribution(
distribution=self.q0, bijector=tfb.Chain(bijectors)
)
if self.batch_norm:
self._set_bn_momentum(bn_momentum)
def __call__(self, N):
x = self.q0.sample(N)
log_q0 = self.q0.log_prob(x)
sum_ldj = 0.0
for i in range(self.num_stages):
stage_i = self.stages[i]
sum_ldj += stage_i.forward_log_det_jacobian(x, event_ndims=1)
x = stage_i(x)
if i < self.num_stages - 1:
permutation_i = self.permutations[i]
x = permutation_i(x)
sum_ldj += permutation_i.forward_log_det_jacobian(x, event_ndims=1)
if self.batch_norm:
batch_norm_i = self.batch_norms[i]
sum_ldj += batch_norm_i.forward_log_det_jacobian(x, event_ndims=1)
x = batch_norm_i(x)
if self.post_affine:
sum_ldj += self.PA.forward_log_det_jacobian(x, event_ndims=1)
x = self.PA(x)
if self.lb is not None and self.ub is not None:
x, _ldj = self.support_mapping.forward_and_log_det_jacobian(x)
sum_ldj += _ldj
log_q_x = log_q0 - sum_ldj
return x, log_q_x
# @tf.function
def sample(self, N):
"""Generate N samples from the network.
:param N: Number of samples.
:type N: int
:return: N samples and log determinant of the jacobians.
:rtype: (tf.Tensor, tf.Tensor)
"""
return self.__call__(N)[0]
def _set_arch_type(self, arch_type): # Make this noninherited?
arch_types = ["coupling", "autoregressive"]
if type(arch_type) is not str:
raise TypeError(format_type_err_msg(self, "arch_type", arch_type, str))
if arch_type not in arch_types:
raise ValueError(
'NormalizingFlow arch_type must be "coupling" or "autoregressive"'
)
self.arch_type = arch_type
def _set_D(self, D):
if type(D) is not int:
raise TypeError(format_type_err_msg(self, "D", D, int))
elif D < 2:
raise ValueError("NormalizingFlow D %d must be greater than 0." % D)
self.D = D
def _set_num_stages(self, num_stages):
if type(num_stages) is not int:
raise TypeError(format_type_err_msg(self, "num_stages", num_stages, int))
elif num_stages < 0:
raise ValueError(
"NormalizingFlow num_stages %d must be greater than 0." % num_stages
)
self.num_stages = num_stages
def _set_num_layers(self, num_layers):
if type(num_layers) is not int:
raise TypeError(format_type_err_msg(self, "num_layers", num_layers, int))
elif num_layers < 1:
raise ValueError(
"NormalizingFlow num_layers arg %d must be greater than 0." % num_layers
)
self.num_layers = num_layers
def _set_num_units(self, num_units):
if type(num_units) is not int:
raise TypeError(format_type_err_msg(self, "num_units", num_units, int))
elif num_units < 1:
raise ValueError(
"NormalizingFlow num_units %d must be greater than 0." % num_units
)
self.num_units = num_units
def _set_elemwise_fn(self, elemwise_fn):
elemwise_fns = ["affine", "spline"]
if type(elemwise_fn) is not str:
raise TypeError(format_type_err_msg(self, "elemwise_fn", elemwise_fn, str))
if elemwise_fn not in elemwise_fns:
raise ValueError('NormalizingFlow elemwise_fn must be "affine" or "spline"')
self.elemwise_fn = elemwise_fn
def _set_num_bins(self, num_bins):
if type(num_bins) is not int:
raise TypeError(format_type_err_msg(self, "num_bins", num_bins, int))
elif num_bins < 2:
raise ValueError(
"NormalizingFlow spline num_bins %d must be greater than 1." % num_units
)
self.num_bins = num_bins
def _set_batch_norm(self, batch_norm):
if type(batch_norm) is not bool:
raise TypeError(format_type_err_msg(self, "batch_norm", batch_norm, bool))
self.batch_norm = batch_norm
def _set_bn_momentum(self, bn_momentum):
if type(bn_momentum) is not float:
raise TypeError(
format_type_err_msg(self, "bn_momentum", bn_momentum, float)
)
self.bn_momentum = bn_momentum
bijectors = self.trans_dist.bijector.bijectors
for bijector in bijectors:
if type(bijector).__name__ == "BatchNormalization":
bijector.batchnorm.momentum = bn_momentum
return None
def _reset_bn_movings(self,):
bijectors = self.trans_dist.bijector.bijectors
for bijector in bijectors:
if type(bijector).__name__ == "BatchNormalization":
bijector.batchnorm.moving_mean.assign(np.zeros((self.D,)))
bijector.batchnorm.moving_variance.assign(np.ones((self.D,)))
return None
def _set_post_affine(self, post_affine):
if type(post_affine) is not bool:
raise TypeError(format_type_err_msg(self, "post_affine", post_affine, bool))
self.post_affine = post_affine
def _set_bounds(self, bounds):
if bounds is not None:
_type = type(bounds)
if _type in [list, tuple]:
len_bounds = len(bounds)
if _type is list:
bounds = tuple(bounds)
else:
raise TypeError(
"NormalizingFlow argument bounds must be tuple or list not %s."
% _type.__name__
)
if len_bounds != 2:
raise ValueError("NormalizingFlow bounds arg must be length 2.")
for i, bound in enumerate(bounds):
if type(bound) is not np.ndarray:
raise TypeError(
format_type_err_msg(self, "bounds[%d]" % i, bound, np.ndarray)
)
self.lb, self.ub = bounds[0], bounds[1]
else:
self.lb, self.ub = None, None
def _set_random_seed(self, random_seed):
if type(random_seed) is not int:
raise TypeError(format_type_err_msg(self, "random_seed", random_seed, int))
self.random_seed = random_seed
def get_init_path(self, mu, Sigma):
init_hash = get_hash([mu, Sigma, self.lb, self.ub])
init_path = "./data/inits/%s/%s/" % (init_hash, self.to_string())
if not os.path.exists(init_path):
os.makedirs(init_path)
init_index = {"mu": mu, "Sigma": Sigma, "lb": self.lb, "ub": self.ub}
init_index_file = "./data/inits/%s/init.pkl" % init_hash
arch_index = {
"arch_type": self.arch_type,
"D": self.D,
"num_stages": self.num_stages,
"num_layers": self.num_layers,
"num_units": self.num_units,
"elemwise_fn": self.elemwise_fn,
"num_bins": self.num_bins,
"batch_norm": self.batch_norm,
"bn_momentum": self.bn_momentum,
"post_affine": self.post_affine,
"lb": self.lb,
"ub": self.ub,
"random_seed": self.random_seed,
}
arch_index_file = "./data/inits/%s/%s/arch.pkl" % (init_hash, self.to_string())
set_dir_index(init_index, init_index_file)
set_dir_index(arch_index, arch_index_file)
return init_path
def initialize(
self,
mu,
Sigma,
N=500,
num_iters=int(1e4),
lr=1e-3,
log_rate=100,
load_if_cached=True,
save=True,
verbose=False,
):
"""Initializes architecture to gaussian distribution via variational inference.
:math:`\\underset{q_\\theta \\in Q}{\\mathrm{arg max}} H(q_\\theta) + \\eta^\\top \\mathbb{E}_{z \\sim q_\\theta}[T(z)]`
where :math:`\\eta` and :math:`T(z)` for a multivariate gaussian are:
:math:`\\eta = \\begin{bmatrix} \\Sigma^{-1}\\mu \\\\ \\mathrm{vec} \\left( -\\frac{1}{2}\\Sigma^{-1} \\right) \\end{bmatrix}`
:math:`T(z) = \\begin{bmatrix} z \\\\ \\mathrm{vec} \\left( zz^\\top \\right) \\end{bmatrix}`
Parameter `init_type` may be:
:obj:`'iso_gauss'` with parameters
* :obj:`init_params.loc` set to scalar mean of each variable.
* :obj:`init_params.scale` set to scale of each variable.
:obj:`'gaussian'` with parameters
* :obj:`init_params.mu` set to the mean.
* :obj:`init_params.Sigma` set to the covariance.
:param init_type: :math:`\\in` `['iso_gauss', 'gaussian']`
:type init_type: str
:param init_params: Parameters according to :obj:`init_type`.
:type init_params: dict
:param N: Number of batch samples per iteration.
:type N: int
:param num_iters: Number of optimization iterations, defaults to 500.
:type num_iters: int, optional
:param lr: Adam optimizer learning rate, defaults to 1e-3.
:type lr: float, optional
:param log_rate: Record optimization data every so iterations, defaults to 100.
:type log_rate: int, optional
:param load_if_cached: If initialization has been optimized before, load it, defaults to True.
:type load_if_cached: bool, optional
:param save: Save initialization if true, defaults to True.
:type save: bool, optional
:param verbose: Print verbose output, defaults to False.
:type verbose: bool, optional
"""
optimizer = tf.keras.optimizers.Adam(lr)
init_path = self.get_init_path(mu, Sigma)
init_file = init_path + "ckpt"
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=self)
ckpt = tf.train.latest_checkpoint(init_path)
if load_if_cached and (ckpt is not None):
print("Loading variables from cached initialization.")
status = checkpoint.restore(ckpt)
status.expect_partial() # Won't use optimizer momentum parameters
opt_data_file = init_path + "opt_data.csv"
if os.path.exists(opt_data_file):
return pd.read_csv(opt_data_file)
t1 = time.time()
eta = gaussian_backward_mapping(mu, Sigma)
def gauss_init_loss(z, log_q_z, eta):
zl = z[:, :, tf.newaxis]
zr = z[:, tf.newaxis, :]
zzT = tf.matmul(zl, zr)
zzT_vec = tf.reshape(zzT, (N, self.D ** 2))
T_z = tf.concat((z, zzT_vec), axis=1)
E_T_z = tf.reduce_mean(T_z, axis=0)
E_log_q_z = tf.reduce_mean(log_q_z)
loss = E_log_q_z - tf.reduce_sum(eta * E_T_z)
return loss
@tf.function
def train_step():
with tf.GradientTape() as tape:
z, log_q_z = self(N)
loss = gauss_init_loss(z, log_q_z, eta)
params = self.trainable_variables
gradients = tape.gradient(loss, params)
ming, maxg = -1e5, 1e5
gradients = [tf.clip_by_value(grad, ming, maxg) for grad in gradients]
optimizer.apply_gradients(zip(gradients, params))
return loss
z, log_q_z = self(N)
loss0 = gauss_init_loss(z, log_q_z, eta).numpy()
H0 = -np.mean(log_q_z.numpy())
KL0 = self.gauss_KL(z, log_q_z, mu, Sigma)
d = {"iteration": 0, "loss": loss0, "H": H0, "KL": KL0}
opt_it_dfs = [pd.DataFrame(d, index=[0])]
for i in range(1, num_iters + 1):
start_time = time.time()
loss = train_step()
ts_time = time.time() - start_time
if np.isnan(loss):
raise ValueError("Initialization loss is nan.")
if not np.isfinite(loss):
raise ValueError("Initialization loss is inf.")
if i % log_rate == 0:
z, log_q_z = self(N)
loss = gauss_init_loss(z, log_q_z, eta).numpy()
H = -np.mean(log_q_z.numpy())
KL = self.gauss_KL(z, log_q_z, mu, Sigma)
d = {"iteration": i, "loss": loss, "H": H, "KL": KL}
opt_it_dfs.append(pd.DataFrame(d, index=[0]))
if verbose:
if not np.isnan(KL):
print(
i,
"H",
H,
"KL",
KL,
"loss",
loss,
"%.2E s" % ts_time,
flush=True,
)
else:
print(i, "H", H, "loss", loss, "%.2E s" % ts_time, flush=True)
init_time = time.time() - t1
opt_df = | pd.concat(opt_it_dfs, ignore_index=True) | pandas.concat |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to process FBI Hate Crime data."""
import os
import sys
import json
import pandas as pd
import numpy as np
import copy
from utils import flatten_by_column, make_time_place_aggregation
# Allows the following module imports to work when running as a script
_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(_SCRIPT_PATH, '../../../util/'))
from statvar_dcid_generator import get_statvar_dcid, _PREPEND_APPEND_REPLACE_MAP
_CACHE_DIR = os.path.join(_SCRIPT_PATH, 'cache')
# Columns to input from source data
_INPUT_COLUMNS = [
'INCIDENT_ID', 'DATA_YEAR', 'OFFENDER_RACE', 'OFFENDER_ETHNICITY',
'STATE_ABBR', 'OFFENSE_NAME', 'BIAS_DESC', 'AGENCY_TYPE_NAME',
'MULTIPLE_OFFENSE', 'MULTIPLE_BIAS', 'PUB_AGENCY_NAME',
'TOTAL_OFFENDER_COUNT', 'ADULT_OFFENDER_COUNT', 'JUVENILE_OFFENDER_COUNT',
'INCIDENT_DATE', 'VICTIM_TYPES', 'LOCATION_NAME', 'VICTIM_COUNT',
'ADULT_VICTIM_COUNT', 'JUVENILE_VICTIM_COUNT'
]
# A dict to map bias descriptions to their bias category
_BIAS_CATEGORY_MAP = {
'Anti-Black or African American':
'race',
'Anti-White':
'race',
'Anti-Native Hawaiian or Other Pacific Islander':
'race',
'Anti-Arab':
'race',
'Anti-Asian':
'race',
'Anti-American Indian or Alaska Native':
'race',
'Anti-Other Race/Ethnicity/Ancestry':
'race',
'Anti-Multiple Races, Group':
'race',
'Anti-Protestant':
'religion',
'Anti-Other Religion':
'religion',
'Anti-Jewish':
'religion',
'Anti-Islamic (Muslim)':
'religion',
'Anti-Jehovah\'s Witness':
'religion',
'Anti-Mormon':
'religion',
'Anti-Buddhist':
'religion',
'Anti-Sikh':
'religion',
'Anti-Other Christian':
'religion',
'Anti-Hindu':
'religion',
'Anti-Catholic':
'religion',
'Anti-Eastern Orthodox (Russian, Greek, Other)':
'religion',
'Anti-Atheism/Agnosticism':
'religion',
'Anti-Multiple Religions, Group':
'religion',
'Anti-Heterosexual':
'sexualOrientation',
'Anti-Lesbian (Female)':
'sexualOrientation',
'Anti-Lesbian, Gay, Bisexual, or Transgender (Mixed Group)':
'sexualOrientation',
'Anti-Bisexual':
'sexualOrientation',
'Anti-Gay (Male)':
'sexualOrientation',
'Anti-Hispanic or Latino':
'ethnicity',
'Anti-Physical Disability':
'disabilityStatus',
'Anti-Mental Disability':
'disabilityStatus',
'Anti-Male':
'gender',
'Anti-Female':
'gender',
'Anti-Transgender':
'TransgenderOrGenderNonConforming',
'Anti-Gender Non-Conforming':
'TransgenderOrGenderNonConforming',
'Unknown (offender\'s motivation not known)':
'UnknownBias'
}
# A dict to map offenses to categories of crime
_OFFENSE_CATEGORY_MAP = {
"Intimidation": "CrimeAgainstPerson",
"Simple Assault": "CrimeAgainstPerson",
"Aggravated Assault": "CrimeAgainstPerson",
"Robbery": "CrimeAgainstProperty",
"Destruction/Damage/Vandalism of Property": "CrimeAgainstProperty",
"Arson": "CrimeAgainstProperty",
"Murder and Nonnegligent Manslaughter": "CrimeAgainstPerson",
"Burglary/Breaking & Entering": "CrimeAgainstProperty",
"Rape": "CrimeAgainstPerson",
"Motor Vehicle Theft": "CrimeAgainstProperty",
"Drug/Narcotic Violations": "CrimeAgainstSociety",
"Weapon Law Violations": "CrimeAgainstSociety",
"Theft From Motor Vehicle": "CrimeAgainstProperty",
"Shoplifting": "CrimeAgainstProperty",
"All Other Larceny": "CrimeAgainstProperty",
"Theft of Motor Vehicle Parts or Accessories": "CrimeAgainstProperty",
"Fondling": "CrimeAgainstPerson",
"Counterfeiting/Forgery": "CrimeAgainstProperty",
"Kidnapping/Abduction": "CrimeAgainstPerson",
"Theft From Building": "CrimeAgainstProperty",
"Pornography/Obscene Material": "CrimeAgainstSociety",
"Embezzlement": "CrimeAgainstProperty",
"Purse-snatching": "CrimeAgainstProperty",
"Drug Equipment Violations": "CrimeAgainstSociety",
"Credit Card/Automated Teller Machine Fraud": "CrimeAgainstProperty",
"Sexual Assault With An Object": "CrimeAgainstPerson",
"False Pretenses/Swindle/Confidence Game": "CrimeAgainstProperty",
"Pocket-picking": "CrimeAgainstProperty",
"Welfare Fraud": "CrimeAgainstProperty",
"Extortion/Blackmail": "CrimeAgainstProperty",
"Stolen Property Offenses": "CrimeAgainstProperty",
"Incest": "CrimeAgainstPerson",
"Sodomy": "CrimeAgainstPerson",
"Negligent Manslaughter": "CrimeAgainstPerson",
"Statutory Rape": "CrimeAgainstPerson",
"Theft From Coin-Operated Machine or Device": "CrimeAgainstProperty",
"Impersonation": "CrimeAgainstProperty",
"Prostitution": "CrimeAgainstSociety",
"Wire Fraud": "CrimeAgainstProperty",
"Assisting or Promoting Prostitution": "CrimeAgainstSociety",
"Purchasing Prostitution": "CrimeAgainstSociety",
"Bribery": "CrimeAgainstProperty",
"Identity Theft": "CrimeAgainstProperty",
"Human Trafficking, Commercial Sex Acts": "CrimeAgainstPerson",
"Hacking/Computer Invasion": "CrimeAgainstProperty",
"Betting/Wagering": "CrimeAgainstSociety",
"Animal Cruelty": "CrimeAgainstSociety",
"Not Specified": "Not Specified"
}
# A dict to generate aggregations on the source data
_AGGREGATIONS = {
'incidents.csv': [{ # Total Criminal Incidents
'df': 'incident_df',
'args': {
'groupby_cols': [],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
}],
'incidents_bias.csv': [
{ # Incidents grouped by bias motivation (anti-white, ...)
'df': 'single_bias_incidents',
'args': {
'groupby_cols': ['BIAS_DESC'],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
},
{ # Incidents grouped into single bias / multiple bias
'df': 'incident_df',
'args': {
'groupby_cols': ['MULTIPLE_BIAS'],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
},
{ # Incidents grouped by bias category (race,religion, gender, ...)
'df': 'single_bias_incidents',
'args': {
'groupby_cols': ['BIAS_CATEGORY'],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
}
],
'incidents_offense.csv': [
{ # Incidents by crime type (arson, robbery, ...)
'df': 'offense_df',
'args': {
'groupby_cols': ['OFFENSE_NAME'],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
},
{ # Incidents by crime category
'df': 'offense_df',
'args': {
'groupby_cols': ['OFFENSE_CATEGORY'],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
}
],
'incidents_offense_bias.csv': [
{ # Incidents by crime type and bias motivation
'df': 'single_bias_offenses',
'args': {
'groupby_cols': ['BIAS_DESC', 'OFFENSE_NAME'],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
},
{ # Incidents grouped by crime type and single bias / multiple bias
'df': 'offense_df',
'args': {
'groupby_cols': ['MULTIPLE_BIAS', 'OFFENSE_NAME'],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
},
{ # Incidents grouped by crime type and bias category
'df': 'single_bias_offenses',
'args': {
'groupby_cols': ['BIAS_CATEGORY', 'OFFENSE_NAME'],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
},
{ # Incidents by crime type and bias motivation
'df': 'single_bias_offenses',
'args': {
'groupby_cols': ['BIAS_DESC', 'OFFENSE_CATEGORY'],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
},
{ # Incidents grouped by crime type and single bias / multiple bias
'df': 'offense_df',
'args': {
'groupby_cols': ['MULTIPLE_BIAS', 'OFFENSE_CATEGORY'],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
},
{ # Incidents grouped by crime type and bias category
'df': 'single_bias_offenses',
'args': {
'groupby_cols': ['BIAS_CATEGORY', 'OFFENSE_CATEGORY'],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
}
],
'incidents_offenderrace.csv': [{ # Total incidents by offender race
'df': 'incident_df',
'args': {
'groupby_cols': ['OFFENDER_RACE'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents'
}
}],
'incidents_offenderethnicity.csv':
[{ # Total incidents by offender ethnicity
'df': 'incident_df',
'args': {
'groupby_cols': ['OFFENDER_ETHNICITY'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents'
}
}],
'incidents_offenderrace_bias.csv': [
{ # Incidents by offender race and bias motivation
'df': 'single_bias_incidents',
'args': {
'groupby_cols': ['OFFENDER_RACE', 'BIAS_DESC'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents'
}
},
{ # Incidents by offender race and single bias / multiple bias
'df': 'incident_df',
'args': {
'groupby_cols': ['OFFENDER_RACE', 'MULTIPLE_BIAS'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents'
}
},
{ # Incidents by offender race and bias category
'df': 'single_bias_incidents',
'args': {
'groupby_cols': ['OFFENDER_RACE', 'BIAS_CATEGORY'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents'
}
}
],
'incidents_offenderethnicity_bias.csv': [
{ # Incidents by offender ethnicity and bias motivation
'df': 'single_bias_incidents',
'args': {
'groupby_cols': ['OFFENDER_ETHNICITY', 'BIAS_DESC'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents'
}
},
{ # Incidents by offender ethnicity and single bias / multiple bias
'df': 'incident_df',
'args': {
'groupby_cols': ['OFFENDER_ETHNICITY', 'MULTIPLE_BIAS'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents'
}
},
{ # Incidents by offender ethnicity and bias category
'df': 'single_bias_incidents',
'args': {
'groupby_cols': ['OFFENDER_ETHNICITY', 'BIAS_CATEGORY'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents'
}
}
],
'offenses.csv': [{ # Total Criminal Offenses
'df': 'offense_df',
'args': {
'groupby_cols': [],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
}],
'victims.csv': [{ # Total Victims
'df': 'incident_df',
'args': {
'groupby_cols': [],
'agg_dict': {
'VICTIM_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Victim'
}
}],
'offenders.csv': [
{ # Total Offenders
'df': 'incident_df',
'args': {
'groupby_cols': [],
'agg_dict': {
'TOTAL_OFFENDER_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offender'
}
},
{ # Total known and unknown Offenders
'df': 'incident_df',
'args': {
'groupby_cols': ['OFFENDER_CATEGORY'],
'agg_dict': {
'TOTAL_OFFENDER_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offender'
}
}
],
'incidents_victimtype_bias.csv': [
{ # Incidents by victim type
'df': 'victim_df',
'args': {
'groupby_cols': ['VICTIM_TYPES'],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
},
{ # Incidents by victim type and bias motivation
'df': 'single_bias_victim',
'args': {
'groupby_cols': ['VICTIM_TYPES', 'BIAS_DESC'],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
},
{ # Incidents by victim type and single bias / multiple bias
'df': 'victim_df',
'args': {
'groupby_cols': ['VICTIM_TYPES', 'MULTIPLE_BIAS'],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
},
{ # Incidents by victim type and bias category
'df': 'single_bias_victim',
'args': {
'groupby_cols': ['VICTIM_TYPES', 'BIAS_CATEGORY'],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
}
],
'incidents_location_bias.csv': [
{ # Incidents by location of crime
'df': 'location_df',
'args': {
'groupby_cols': ['LOCATION_NAME'],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
},
{ # Incidents by location of crime and single bias / multiple bias
'df': 'location_df',
'args': {
'groupby_cols': ['LOCATION_NAME', 'MULTIPLE_BIAS'],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
},
{ # Incidents by location of crime and bias category
'df': 'single_bias_location',
'args': {
'groupby_cols': ['LOCATION_NAME', 'BIAS_CATEGORY'],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
}
],
'incidents_multiple_location_bias.csv': [
{ # Incidents by location of crime
'df': 'location_df',
'args': {
'groupby_cols': ['MULTIPLE_LOCATION_NAME'],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
},
{ # Incidents by location of crime and single bias / multiple bias
'df': 'location_df',
'args': {
'groupby_cols': ['MULTIPLE_LOCATION_NAME', 'MULTIPLE_BIAS'],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
},
{ # Incidents by location of crime and bias category
'df': 'single_bias_location',
'args': {
'groupby_cols': ['MULTIPLE_LOCATION_NAME', 'BIAS_CATEGORY'],
'agg_dict': {
'INCIDENT_ID': 'nunique'
},
'population_type': 'CriminalIncidents'
}
}
],
'offense_bias.csv': [
{ # Offenses grouped by bias motivation
'df': 'single_bias_offenses',
'args': {
'groupby_cols': ['BIAS_DESC'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
{ # Offenses grouped by single bias / multiple bias
'df': 'offense_df',
'args': {
'groupby_cols': ['MULTIPLE_BIAS'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
{ # Offenses grouped by bias category
'df': 'single_bias_offenses',
'args': {
'groupby_cols': ['BIAS_CATEGORY'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
],
'offense_offensetype.csv': [
{ # Offenses grouped by offense type
'df': 'offense_df',
'args': {
'groupby_cols': ['OFFENSE_NAME'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
{ # Offenses grouped by offense category
'df': 'offense_df',
'args': {
'groupby_cols': ['OFFENSE_CATEGORY'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
}
],
'offense_victimtype.csv': [{ # Offenses grouped by offense type
'df': 'offense_victim_df',
'args': {
'groupby_cols': ['VICTIM_TYPES'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
}],
'offense_offensetype_victimtype.csv': [
{ # Offenses grouped by offense type
'df': 'offense_single_victimtype_df',
'args': {
'groupby_cols': ['OFFENSE_NAME', 'VICTIM_TYPES'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
{ # Offenses grouped by offense category
'df': 'offense_single_victimtype_df',
'args': {
'groupby_cols': ['OFFENSE_CATEGORY', 'VICTIM_TYPES'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
}
],
'offense_offensetype_victimtype_multiple.csv': [
{ # Offenses grouped by offense type
'df': 'offense_multiple_victimtype_df',
'args': {
'groupby_cols': ['OFFENSE_NAME', 'MULTIPLE_VICTIM_TYPE'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
{ # Offenses grouped by offense category
'df': 'offense_multiple_victimtype_df',
'args': {
'groupby_cols': ['OFFENSE_CATEGORY', 'MULTIPLE_VICTIM_TYPE'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
}
],
'offense_offensetype_offenderrace.csv': [
{ # Offenses grouped by offense type
'df': 'offense_df',
'args': {
'groupby_cols': ['OFFENSE_NAME', 'OFFENDER_RACE'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
{ # Offenses grouped by offense category
'df': 'offense_df',
'args': {
'groupby_cols': ['OFFENSE_CATEGORY', 'OFFENDER_RACE'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
}
],
'offense_offensetype_offenderethnicity.csv': [
{ # Offenses grouped by offense type
'df': 'offense_df',
'args': {
'groupby_cols': ['OFFENSE_NAME', 'OFFENDER_ETHNICITY'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
{ # Offenses grouped by offense category
'df': 'offense_df',
'args': {
'groupby_cols': ['OFFENSE_CATEGORY', 'OFFENDER_ETHNICITY'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
}
],
'offense_offensetype_offendercategory.csv': [
{ # Offenses grouped by offense type
'df': 'offense_df',
'args': {
'groupby_cols': ['OFFENSE_NAME', 'OFFENDER_CATEGORY'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
{ # Offenses grouped by offense category
'df': 'offense_df',
'args': {
'groupby_cols': ['OFFENSE_CATEGORY', 'OFFENDER_CATEGORY'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
}
],
'offenses_offense_bias.csv': [
{ # Offenses by crime type and bias motivation
'df': 'single_bias_offenses',
'args': {
'groupby_cols': ['BIAS_DESC', 'OFFENSE_NAME'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
{ # Offenses grouped by crime type and single bias / multiple bias
'df': 'offense_df',
'args': {
'groupby_cols': ['MULTIPLE_BIAS', 'OFFENSE_NAME'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
{ # Offenses grouped by crime type and bias category
'df': 'single_bias_offenses',
'args': {
'groupby_cols': ['BIAS_CATEGORY', 'OFFENSE_NAME'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
{ # Offenses by crime type and bias motivation
'df': 'single_bias_offenses',
'args': {
'groupby_cols': ['BIAS_DESC', 'OFFENSE_CATEGORY'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
{ # Offenses grouped by crime type and single bias / multiple bias
'df': 'offense_df',
'args': {
'groupby_cols': ['MULTIPLE_BIAS', 'OFFENSE_CATEGORY'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
{ # Offenses grouped by crime type and bias category
'df': 'single_bias_offenses',
'args': {
'groupby_cols': ['BIAS_CATEGORY', 'OFFENSE_CATEGORY'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
}
],
'offenses_offenderrace_bias.csv': [
{ # Offenses by offender race and bias motivation
'df': 'single_bias_offenses',
'args': {
'groupby_cols': ['OFFENDER_RACE', 'BIAS_DESC'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
{ # Offenses by offender race and single bias / multiple bias
'df': 'offense_df',
'args': {
'groupby_cols': ['OFFENDER_RACE', 'MULTIPLE_BIAS'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
{ # Offenses by offender race and bias category
'df': 'single_bias_offenses',
'args': {
'groupby_cols': ['OFFENDER_RACE', 'BIAS_CATEGORY'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
{ # Offenses by offender race and bias category
'df': 'single_bias_offenses',
'args': {
'groupby_cols': ['OFFENDER_RACE'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
}
],
'offenses_offenderethnicity_bias.csv': [
{ # Offenses by offender ethnicity and bias motivation
'df': 'single_bias_offenses',
'args': {
'groupby_cols': ['OFFENDER_ETHNICITY', 'BIAS_DESC'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
{ # Offenses by offender ethnicity and single bias / multiple bias
'df': 'offense_df',
'args': {
'groupby_cols': ['OFFENDER_ETHNICITY', 'MULTIPLE_BIAS'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
{ # Offenses by offender ethnicity and bias category
'df': 'single_bias_offenses',
'args': {
'groupby_cols': ['OFFENDER_ETHNICITY', 'BIAS_CATEGORY'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
{ # Offenses by offender ethnicity
'df': 'single_bias_offenses',
'args': {
'groupby_cols': ['OFFENDER_ETHNICITY'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
}
],
'offenses_offendertype_bias.csv': [
{ # Offenses by offender race and bias motivation
'df': 'single_bias_offenses',
'args': {
'groupby_cols': ['OFFENDER_CATEGORY', 'BIAS_DESC'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
{ # Offenses by offender race and single bias / multiple bias
'df': 'offense_df',
'args': {
'groupby_cols': ['OFFENDER_CATEGORY', 'MULTIPLE_BIAS'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
},
{ # Offenses by offender race and bias category
'df': 'single_bias_offenses',
'args': {
'groupby_cols': ['OFFENDER_CATEGORY', 'BIAS_CATEGORY'],
'agg_dict': {
'INCIDENT_ID': 'count'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offense'
}
}
],
'victims_bias.csv': [
{ # Victims grouped by bias motivation (anti-white, ...)
'df': 'single_bias_incidents',
'args': {
'groupby_cols': ['BIAS_DESC'],
'agg_dict': {
'VICTIM_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Victim'
}
},
{ # Victims grouped into single bias / multiple bias
'df': 'incident_df',
'args': {
'groupby_cols': ['MULTIPLE_BIAS'],
'agg_dict': {
'VICTIM_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Victim'
}
},
{ # Victims grouped by bias category (race,religion, gender, ...)
'df': 'single_bias_incidents',
'args': {
'groupby_cols': ['BIAS_CATEGORY'],
'agg_dict': {
'VICTIM_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Victim'
}
}
],
'victims_adult_bias.csv': [
{ # Victims grouped by bias motivation (anti-white, ...)
'df': 'single_bias_incidents',
'args': {
'groupby_cols': ['BIAS_DESC'],
'agg_dict': {
'ADULT_VICTIM_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Victim',
'common_pvs': {
'victimAge': '[18 - Years]'
}
}
},
{ # Victims grouped into single bias / multiple bias
'df': 'incident_df',
'args': {
'groupby_cols': ['MULTIPLE_BIAS'],
'agg_dict': {
'ADULT_VICTIM_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Victim',
'common_pvs': {
'victimAge': '[18 - Years]'
}
}
},
{ # Victims grouped by bias category (race,religion, gender, ...)
'df': 'single_bias_incidents',
'args': {
'groupby_cols': ['BIAS_CATEGORY'],
'agg_dict': {
'ADULT_VICTIM_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Victim',
'common_pvs': {
'victimAge': '[18 - Years]'
}
}
}
],
'victims_juvenile_bias.csv': [
{ # Victims grouped by bias motivation (anti-white, ...)
'df': 'single_bias_incidents',
'args': {
'groupby_cols': ['BIAS_DESC'],
'agg_dict': {
'JUVENILE_VICTIM_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Victim',
'common_pvs': {
'victimAge': '[- 17 Years]'
}
}
},
{ # Victims grouped into single bias / multiple bias
'df': 'incident_df',
'args': {
'groupby_cols': ['MULTIPLE_BIAS'],
'agg_dict': {
'JUVENILE_VICTIM_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Victim',
'common_pvs': {
'victimAge': '[- 17 Years]'
}
}
},
{ # Victims grouped by bias category (race,religion, gender, ...)
'df': 'single_bias_incidents',
'args': {
'groupby_cols': ['BIAS_CATEGORY'],
'agg_dict': {
'JUVENILE_VICTIM_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Victim',
'common_pvs': {
'victimAge': '[- 17 Years]'
}
}
}
],
'victims_offense.csv': [
{ # Victims by crime type (arson, robbery, ...)
'df': 'offense_df',
'args': {
'groupby_cols': ['OFFENSE_NAME'],
'agg_dict': {
'VICTIM_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Victim'
}
},
{ # Victims by crime category
'df': 'unq_offense_df',
'args': {
'groupby_cols': ['OFFENSE_CATEGORY'],
'agg_dict': {
'VICTIM_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Victim'
}
}
],
'victims_offense_bias.csv': [
{ # Offenses by crime type and bias motivation
'df': 'single_bias_offenses',
'args': {
'groupby_cols': ['BIAS_DESC', 'OFFENSE_NAME'],
'agg_dict': {
'VICTIM_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Victim'
}
},
{ # Offenses grouped by crime type and single bias / multiple bias
'df': 'offense_df',
'args': {
'groupby_cols': ['MULTIPLE_BIAS', 'OFFENSE_NAME'],
'agg_dict': {
'VICTIM_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Victim'
}
},
{ # Offenses grouped by crime type and bias category
'df': 'single_bias_offenses',
'args': {
'groupby_cols': ['BIAS_CATEGORY', 'OFFENSE_NAME'],
'agg_dict': {
'VICTIM_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Victim'
}
},
{ # Offenses by crime type and bias motivation
'df': 'unq_single_bias_offenses',
'args': {
'groupby_cols': ['BIAS_DESC', 'OFFENSE_CATEGORY'],
'agg_dict': {
'VICTIM_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Victim'
}
},
{ # Offenses grouped by crime type and single bias / multiple bias
'df': 'unq_offense_df',
'args': {
'groupby_cols': ['MULTIPLE_BIAS', 'OFFENSE_CATEGORY'],
'agg_dict': {
'VICTIM_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Victim'
}
},
{ # Offenses grouped by crime type and bias category
'df': 'unq_single_bias_offenses',
'args': {
'groupby_cols': ['BIAS_CATEGORY', 'OFFENSE_CATEGORY'],
'agg_dict': {
'VICTIM_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Victim'
}
}
],
'offenders_bias.csv': [
{ # Offenders grouped by bias motivation (anti-white, ...)
'df': 'single_bias_incidents',
'args': {
'groupby_cols': ['BIAS_DESC'],
'agg_dict': {
'TOTAL_OFFENDER_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offender'
}
},
{ # Offenders grouped into single bias / multiple bias
'df': 'incident_df',
'args': {
'groupby_cols': ['MULTIPLE_BIAS'],
'agg_dict': {
'TOTAL_OFFENDER_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offender'
}
},
{ # Offenders grouped by bias category (race,religion, gender, ...)
'df': 'single_bias_incidents',
'args': {
'groupby_cols': ['BIAS_CATEGORY'],
'agg_dict': {
'TOTAL_OFFENDER_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offender'
}
}
],
'offenders_type_bias.csv': [
{ # Offenders grouped by bias motivation (anti-white, ...)
'df': 'single_bias_incidents',
'args': {
'groupby_cols': ['BIAS_DESC', 'OFFENDER_CATEGORY'],
'agg_dict': {
'TOTAL_OFFENDER_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offender'
}
},
{ # Offenders grouped into single bias / multiple bias
'df': 'incident_df',
'args': {
'groupby_cols': ['MULTIPLE_BIAS', 'OFFENDER_CATEGORY'],
'agg_dict': {
'TOTAL_OFFENDER_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offender'
}
},
{ # Offenders grouped by bias category (race,religion, gender, ...)
'df': 'single_bias_incidents',
'args': {
'groupby_cols': ['BIAS_CATEGORY', 'OFFENDER_CATEGORY'],
'agg_dict': {
'TOTAL_OFFENDER_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offender'
}
}
],
'offenders_offense.csv': [
{ # Offenders by crime type (arson, robbery, ...)
'df': 'offense_df',
'args': {
'groupby_cols': ['OFFENSE_NAME'],
'agg_dict': {
'TOTAL_OFFENDER_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offender'
}
},
{ # Offenders by crime category
'df': 'unq_offense_df',
'args': {
'groupby_cols': ['OFFENSE_CATEGORY'],
'agg_dict': {
'TOTAL_OFFENDER_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offender'
}
}
],
'offenders_type_offense.csv': [
{ # Offenders by crime type (arson, robbery, ...)
'df': 'offense_df',
'args': {
'groupby_cols': ['OFFENSE_NAME', 'OFFENDER_CATEGORY'],
'agg_dict': {
'TOTAL_OFFENDER_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offender'
}
},
{ # Offenders by crime category
'df': 'unq_offense_df',
'args': {
'groupby_cols': ['OFFENSE_CATEGORY', 'OFFENDER_CATEGORY'],
'agg_dict': {
'TOTAL_OFFENDER_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offender'
}
}
],
'offenders_offenderrace.csv': [
{ # Offenders grouped into single bias / multiple bias
'df': 'known_offender_race',
'args': {
'groupby_cols': [],
'agg_dict': {
'TOTAL_OFFENDER_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offender',
'common_pvs': {
'offenderType': 'KnownOffenderRace'
}
}
},
{ # Offenders grouped into single bias / multiple bias
'df': 'known_offender_age',
'args': {
'groupby_cols': [],
'agg_dict': {
'TOTAL_OFFENDER_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offender',
'common_pvs': {
'offenderType': 'KnownOffenderAge'
}
}
},
{ # Offenders grouped into single bias / multiple bias
'df': 'known_offender',
'args': {
'groupby_cols': ['OFFENDER_RACE'],
'agg_dict': {
'TOTAL_OFFENDER_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offender',
'common_pvs': {
'offenderType': 'KnownOffenderRace'
}
}
}
],
'offenders_offenderethnicity.csv': [
{ # Offenders grouped into single bias / multiple bias
'df': 'known_offender_ethnicity',
'args': {
'groupby_cols': [],
'agg_dict': {
'TOTAL_OFFENDER_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offender',
'common_pvs': {
'offenderType': 'KnownOffenderEthnicity'
}
}
},
{ # Offenders grouped into single bias / multiple bias
'df': 'known_offender',
'args': {
'groupby_cols': ['OFFENDER_ETHNICITY'],
'agg_dict': {
'TOTAL_OFFENDER_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offender',
'common_pvs': {
'offenderType': 'KnownOffenderEthnicity'
}
}
}
],
'offenders_offenderadult.csv': [
{ # Offenders grouped into single bias / multiple bias
'df': 'known_offender',
'args': {
'groupby_cols': [],
'agg_dict': {
'ADULT_OFFENDER_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offender',
'common_pvs': {
'offenderAge': '[18 - Years]',
'offenderType': 'KnownOffenderAge'
}
}
}
],
'offenders_offenderjuvenile.csv': [
{ # Offenders grouped into single bias / multiple bias
'df': 'known_offender',
'args': {
'groupby_cols': [],
'agg_dict': {
'JUVENILE_OFFENDER_COUNT': 'sum'
},
'population_type': 'CriminalIncidents',
'measurement_qualifier': 'Offender',
'common_pvs': {
'offenderAge': '[- 17 Years]',
'offenderType': 'KnownOffenderAge'
}
}
}
],
}
def _create_df_dict(df: pd.DataFrame, use_cache: bool = False) -> dict:
"""Applies transformations on the hate crime dataframe. These transformed
dataframes are then used in the aggregations.
Args:
df: A pandas.DataFrame of the hate crime data.
Returns:
A dictionary which has transformation name as key and the transformed
dataframe as it's value.
"""
# Create cache dir if not present
os.makedirs(_CACHE_DIR, exist_ok=True)
fill_unknown_cols = ['OFFENDER_RACE', 'OFFENDER_ETHNICITY']
df['BIAS_CATEGORY'] = ''
df_dict = {}
df[fill_unknown_cols] = df[fill_unknown_cols].fillna('Unknown')
incident_path = os.path.join(_CACHE_DIR, 'incident.csv')
if use_cache and os.path.exists(incident_path):
incident_df = pd.read_csv(incident_path)
else:
incident_df = df.apply(_add_bias_category, axis=1)
incident_df = incident_df.apply(_add_offender_category, axis=1)
incident_df = incident_df.apply(_add_multiple_victims, axis=1)
incident_df = incident_df.apply(_add_multiple_locations, axis=1)
incident_df.to_csv(incident_path, index=False)
df_dict['incident_df'] = incident_df
offense_path = os.path.join(_CACHE_DIR, 'offense.csv')
if use_cache and os.path.exists(offense_path):
offense_df = pd.read_csv(offense_path)
else:
offense_df = flatten_by_column(incident_df, 'OFFENSE_NAME')
offense_df = offense_df.apply(_add_offense_category, axis=1)
offense_df.to_csv(offense_path, index=False)
df_dict['offense_df'] = offense_df
location_path = os.path.join(_CACHE_DIR, 'location.csv')
if use_cache and os.path.exists(location_path):
location_df = pd.read_csv(location_path)
else:
location_df = flatten_by_column(incident_df, 'LOCATION_NAME')
location_df.to_csv(location_path, index=False)
df_dict['location_df'] = location_df
victim_path = os.path.join(_CACHE_DIR, 'victim.csv')
if use_cache and os.path.exists(victim_path):
victim_df = pd.read_csv(victim_path)
else:
victim_df = flatten_by_column(incident_df, 'VICTIM_TYPES')
victim_df.to_csv(victim_path, index=False)
df_dict['victim_df'] = victim_df
offense_victim_path = os.path.join(_CACHE_DIR, 'offense_victim.csv')
if use_cache and os.path.exists(offense_victim_path):
offense_victim_df = pd.read_csv(offense_victim_path)
else:
offense_victim_df = flatten_by_column(offense_df, 'VICTIM_TYPES')
offense_victim_df.to_csv(victim_path, index=False)
df_dict['offense_victim_df'] = offense_victim_df
sb_incidents_path = os.path.join(_CACHE_DIR, 'sb_incidents.csv')
if use_cache and os.path.exists(sb_incidents_path):
single_bias_incidents = | pd.read_csv(sb_incidents_path) | pandas.read_csv |
import pandas as pd
scores = pd.DataFrame({
'Physics': | pd.Series([15, 12, 8, 8, 7, 7, 7, 6, 5, 3]) | pandas.Series |
import os
import math
from tqdm import tqdm
import textwrap
from PIL import Image
import numpy as np
import pandas as pd
import matplotlib.patches as patches
import matplotlib.pyplot as plt
def compute_nb_days(db, start):
"""
Compute the number of days of the project (days between start and the date of the last action)
:param db:
:param start:
:return:
"""
nb_days = 0
for task_id in db.get_task_ids():
task_history = db.get_task_history()[str(task_id)]['taskHistory']
if len(task_history) > 0:
date1 = pd.to_datetime(task_history[0]['actionDate']).date()
date2 = | pd.to_datetime(task_history[-1]['actionDate']) | pandas.to_datetime |
import tempfile
from pathlib import Path
import pandas as pd
import pytest
from hypothesis import settings
from autorad.config import config
from autorad.data.dataset import FeatureDataset
settings.register_profile("fast", max_examples=2)
settings.register_profile("slow", max_examples=10)
prostate_root = Path(config.TEST_DATA_DIR) / "nifti" / "prostate"
prostate_data = {
"img": prostate_root / "img.nii.gz",
"seg": prostate_root / "seg_one_label.nii.gz",
"seg_two_labels": prostate_root / "seg_two_labels.nii.gz",
"empty_seg": prostate_root / "seg_empty.nii.gz",
}
@pytest.fixture
def small_paths_df():
paths_df = pd.DataFrame(
{
"ID": ["case_1_single_label", "case_2_two_labels"],
"img": [prostate_data["img"], prostate_data["img"]],
"seg": [prostate_data["seg"], prostate_data["seg_two_labels"]],
}
)
return paths_df
@pytest.fixture
def empty_df():
df = | pd.DataFrame() | pandas.DataFrame |
import copy
import os
import warnings
from collections import OrderedDict
import numpy as np
import pandas as pd
import woodwork as ww
from sklearn.exceptions import NotFittedError
from sklearn.inspection import partial_dependence as sk_partial_dependence
from sklearn.inspection._partial_dependence import (
_grid_from_X,
_partial_dependence_brute,
)
from sklearn.manifold import TSNE
from sklearn.metrics import auc as sklearn_auc
from sklearn.metrics import confusion_matrix as sklearn_confusion_matrix
from sklearn.metrics import (
precision_recall_curve as sklearn_precision_recall_curve,
)
from sklearn.metrics import roc_curve as sklearn_roc_curve
from sklearn.preprocessing import LabelBinarizer
from sklearn.tree import export_graphviz
from sklearn.utils.multiclass import unique_labels
import evalml
from evalml.exceptions import NoPositiveLabelException, NullsInColumnWarning
from evalml.model_family import ModelFamily
from evalml.model_understanding.permutation_importance import (
calculate_permutation_importance,
)
from evalml.objectives.utils import get_objective
from evalml.problem_types import ProblemTypes
from evalml.utils import import_or_raise, infer_feature_types, jupyter_check
def confusion_matrix(y_true, y_predicted, normalize_method="true"):
"""Confusion matrix for binary and multiclass classification.
Arguments:
y_true (pd.Series or np.ndarray): True binary labels.
y_pred (pd.Series or np.ndarray): Predictions from a binary classifier.
normalize_method ({'true', 'pred', 'all', None}): Normalization method to use, if not None. Supported options are: 'true' to normalize by row, 'pred' to normalize by column, or 'all' to normalize by all values. Defaults to 'true'.
Returns:
pd.DataFrame: Confusion matrix. The column header represents the predicted labels while row header represents the actual labels.
"""
y_true = infer_feature_types(y_true)
y_predicted = infer_feature_types(y_predicted)
y_true = y_true.to_numpy()
y_predicted = y_predicted.to_numpy()
labels = unique_labels(y_true, y_predicted)
conf_mat = sklearn_confusion_matrix(y_true, y_predicted)
conf_mat = pd.DataFrame(conf_mat, index=labels, columns=labels)
if normalize_method is not None:
return normalize_confusion_matrix(conf_mat, normalize_method=normalize_method)
return conf_mat
def normalize_confusion_matrix(conf_mat, normalize_method="true"):
"""Normalizes a confusion matrix.
Arguments:
conf_mat (pd.DataFrame or np.ndarray): Confusion matrix to normalize.
normalize_method ({'true', 'pred', 'all'}): Normalization method. Supported options are: 'true' to normalize by row, 'pred' to normalize by column, or 'all' to normalize by all values. Defaults to 'true'.
Returns:
pd.DataFrame: normalized version of the input confusion matrix. The column header represents the predicted labels while row header represents the actual labels.
"""
conf_mat = infer_feature_types(conf_mat)
col_names = conf_mat.columns
conf_mat = conf_mat.to_numpy()
with warnings.catch_warnings(record=True) as w:
if normalize_method == "true":
conf_mat = conf_mat.astype("float") / conf_mat.sum(axis=1)[:, np.newaxis]
elif normalize_method == "pred":
conf_mat = conf_mat.astype("float") / conf_mat.sum(axis=0)
elif normalize_method == "all":
conf_mat = conf_mat.astype("float") / conf_mat.sum().sum()
else:
raise ValueError(
'Invalid value provided for "normalize_method": {}'.format(
normalize_method
)
)
if w and "invalid value encountered in" in str(w[0].message):
raise ValueError(
"Sum of given axis is 0 and normalization is not possible. Please select another option."
)
conf_mat = pd.DataFrame(conf_mat, index=col_names, columns=col_names)
return conf_mat
def graph_confusion_matrix(
y_true, y_pred, normalize_method="true", title_addition=None
):
"""Generate and display a confusion matrix plot.
If `normalize_method` is set, hover text will show raw count, otherwise hover text will show count normalized with method 'true'.
Arguments:
y_true (pd.Series or np.ndarray): True binary labels.
y_pred (pd.Series or np.ndarray): Predictions from a binary classifier.
normalize_method ({'true', 'pred', 'all', None}): Normalization method to use, if not None. Supported options are: 'true' to normalize by row, 'pred' to normalize by column, or 'all' to normalize by all values. Defaults to 'true'.
title_addition (str or None): if not None, append to plot title. Defaults to None.
Returns:
plotly.Figure representing the confusion matrix plot generated
"""
_go = import_or_raise(
"plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects"
)
_ff = import_or_raise(
"plotly.figure_factory",
error_msg="Cannot find dependency plotly.figure_factory",
)
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
conf_mat = confusion_matrix(y_true, y_pred, normalize_method=None)
conf_mat_normalized = confusion_matrix(
y_true, y_pred, normalize_method=normalize_method or "true"
)
labels = conf_mat.columns.tolist()
title = "Confusion matrix{}{}".format(
"" if title_addition is None else (" " + title_addition),
""
if normalize_method is None
else (', normalized using method "' + normalize_method + '"'),
)
z_data, custom_data = (
(conf_mat, conf_mat_normalized)
if normalize_method is None
else (conf_mat_normalized, conf_mat)
)
z_data = z_data.to_numpy()
z_text = [["{:.3f}".format(y) for y in x] for x in z_data]
primary_heading, secondary_heading = (
("Raw", "Normalized") if normalize_method is None else ("Normalized", "Raw")
)
hover_text = (
"<br><b>"
+ primary_heading
+ " Count</b>: %{z}<br><b>"
+ secondary_heading
+ " Count</b>: %{customdata} <br>"
)
# the "<extra> tags at the end are necessary to remove unwanted trace info
hover_template = (
"<b>True</b>: %{y}<br><b>Predicted</b>: %{x}" + hover_text + "<extra></extra>"
)
layout = _go.Layout(
title={"text": title},
xaxis={"title": "Predicted Label", "type": "category", "tickvals": labels},
yaxis={"title": "True Label", "type": "category", "tickvals": labels},
)
fig = _ff.create_annotated_heatmap(
z_data,
x=labels,
y=labels,
annotation_text=z_text,
customdata=custom_data,
hovertemplate=hover_template,
colorscale="Blues",
showscale=True,
)
fig.update_layout(layout)
# put xaxis text on bottom to not overlap with title
fig["layout"]["xaxis"].update(side="bottom")
# plotly Heatmap y axis defaults to the reverse of what we want: https://community.plotly.com/t/heatmap-y-axis-is-reversed-by-default-going-against-standard-convention-for-matrices/32180
fig.update_yaxes(autorange="reversed")
return fig
def precision_recall_curve(y_true, y_pred_proba, pos_label_idx=-1):
"""
Given labels and binary classifier predicted probabilities, compute and return the data representing a precision-recall curve.
Arguments:
y_true (pd.Series or np.ndarray): True binary labels.
y_pred_proba (pd.Series or np.ndarray): Predictions from a binary classifier, before thresholding has been applied. Note this should be the predicted probability for the "true" label.
pos_label_idx (int): the column index corresponding to the positive class. If predicted probabilities are two-dimensional, this will be used to access the probabilities for the positive class.
Returns:
list: Dictionary containing metrics used to generate a precision-recall plot, with the following keys:
* `precision`: Precision values.
* `recall`: Recall values.
* `thresholds`: Threshold values used to produce the precision and recall.
* `auc_score`: The area under the ROC curve.
"""
y_true = infer_feature_types(y_true)
y_pred_proba = infer_feature_types(y_pred_proba)
if isinstance(y_pred_proba, pd.DataFrame):
y_pred_proba_shape = y_pred_proba.shape
try:
y_pred_proba = y_pred_proba.iloc[:, pos_label_idx]
except IndexError:
raise NoPositiveLabelException(
f"Predicted probabilities of shape {y_pred_proba_shape} don't contain a column at index {pos_label_idx}"
)
precision, recall, thresholds = sklearn_precision_recall_curve(y_true, y_pred_proba)
auc_score = sklearn_auc(recall, precision)
return {
"precision": precision,
"recall": recall,
"thresholds": thresholds,
"auc_score": auc_score,
}
def graph_precision_recall_curve(y_true, y_pred_proba, title_addition=None):
"""Generate and display a precision-recall plot.
Arguments:
y_true (pd.Series or np.ndarray): True binary labels.
y_pred_proba (pd.Series or np.ndarray): Predictions from a binary classifier, before thresholding has been applied. Note this should be the predicted probability for the "true" label.
title_addition (str or None): If not None, append to plot title. Default None.
Returns:
plotly.Figure representing the precision-recall plot generated
"""
_go = import_or_raise(
"plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects"
)
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
precision_recall_curve_data = precision_recall_curve(y_true, y_pred_proba)
title = "Precision-Recall{}".format(
"" if title_addition is None else (" " + title_addition)
)
layout = _go.Layout(
title={"text": title},
xaxis={"title": "Recall", "range": [-0.05, 1.05]},
yaxis={"title": "Precision", "range": [-0.05, 1.05]},
)
data = []
data.append(
_go.Scatter(
x=precision_recall_curve_data["recall"],
y=precision_recall_curve_data["precision"],
name="Precision-Recall (AUC {:06f})".format(
precision_recall_curve_data["auc_score"]
),
line=dict(width=3),
)
)
return _go.Figure(layout=layout, data=data)
def roc_curve(y_true, y_pred_proba):
"""
Given labels and classifier predicted probabilities, compute and return the data representing a Receiver Operating Characteristic (ROC) curve. Works with binary or multiclass problems.
Arguments:
y_true (pd.Series or np.ndarray): True labels.
y_pred_proba (pd.Series or np.ndarray): Predictions from a classifier, before thresholding has been applied.
Returns:
list(dict): A list of dictionaries (with one for each class) is returned. Binary classification problems return a list with one dictionary.
Each dictionary contains metrics used to generate an ROC plot with the following keys:
* `fpr_rate`: False positive rate.
* `tpr_rate`: True positive rate.
* `threshold`: Threshold values used to produce each pair of true/false positive rates.
* `auc_score`: The area under the ROC curve.
"""
y_true = infer_feature_types(y_true).to_numpy()
y_pred_proba = infer_feature_types(y_pred_proba).to_numpy()
if len(y_pred_proba.shape) == 1:
y_pred_proba = y_pred_proba.reshape(-1, 1)
if y_pred_proba.shape[1] == 2:
y_pred_proba = y_pred_proba[:, 1].reshape(-1, 1)
nan_indices = np.logical_or(pd.isna(y_true), np.isnan(y_pred_proba).any(axis=1))
y_true = y_true[~nan_indices]
y_pred_proba = y_pred_proba[~nan_indices]
lb = LabelBinarizer()
lb.fit(np.unique(y_true))
y_one_hot_true = lb.transform(y_true)
n_classes = y_one_hot_true.shape[1]
curve_data = []
for i in range(n_classes):
fpr_rates, tpr_rates, thresholds = sklearn_roc_curve(
y_one_hot_true[:, i], y_pred_proba[:, i]
)
auc_score = sklearn_auc(fpr_rates, tpr_rates)
curve_data.append(
{
"fpr_rates": fpr_rates,
"tpr_rates": tpr_rates,
"thresholds": thresholds,
"auc_score": auc_score,
}
)
return curve_data
def graph_roc_curve(y_true, y_pred_proba, custom_class_names=None, title_addition=None):
"""Generate and display a Receiver Operating Characteristic (ROC) plot for binary and multiclass classification problems.
Arguments:
y_true (pd.Series or np.ndarray): True labels.
y_pred_proba (pd.Series or np.ndarray): Predictions from a classifier, before thresholding has been applied. Note this should a one dimensional array with the predicted probability for the "true" label in the binary case.
custom_class_labels (list or None): If not None, custom labels for classes. Default None.
title_addition (str or None): if not None, append to plot title. Default None.
Returns:
plotly.Figure representing the ROC plot generated
"""
_go = import_or_raise(
"plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects"
)
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
title = "Receiver Operating Characteristic{}".format(
"" if title_addition is None else (" " + title_addition)
)
layout = _go.Layout(
title={"text": title},
xaxis={"title": "False Positive Rate", "range": [-0.05, 1.05]},
yaxis={"title": "True Positive Rate", "range": [-0.05, 1.05]},
)
all_curve_data = roc_curve(y_true, y_pred_proba)
graph_data = []
n_classes = len(all_curve_data)
if custom_class_names and len(custom_class_names) != n_classes:
raise ValueError(
"Number of custom class names does not match number of classes"
)
for i in range(n_classes):
roc_curve_data = all_curve_data[i]
name = i + 1 if custom_class_names is None else custom_class_names[i]
graph_data.append(
_go.Scatter(
x=roc_curve_data["fpr_rates"],
y=roc_curve_data["tpr_rates"],
hovertemplate="(False Postive Rate: %{x}, True Positive Rate: %{y})<br>"
+ "Threshold: %{text}",
name=f"Class {name} (AUC {roc_curve_data['auc_score']:.06f})",
text=roc_curve_data["thresholds"],
line=dict(width=3),
)
)
graph_data.append(
_go.Scatter(
x=[0, 1], y=[0, 1], name="Trivial Model (AUC 0.5)", line=dict(dash="dash")
)
)
return _go.Figure(layout=layout, data=graph_data)
def graph_permutation_importance(pipeline, X, y, objective, importance_threshold=0):
"""Generate a bar graph of the pipeline's permutation importance.
Arguments:
pipeline (PipelineBase or subclass): Fitted pipeline
X (pd.DataFrame): The input data used to score and compute permutation importance
y (pd.Series): The target data
objective (str, ObjectiveBase): Objective to score on
importance_threshold (float, optional): If provided, graph features with a permutation importance whose absolute value is larger than importance_threshold. Defaults to zero.
Returns:
plotly.Figure, a bar graph showing features and their respective permutation importance.
"""
go = import_or_raise(
"plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects"
)
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
perm_importance = calculate_permutation_importance(pipeline, X, y, objective)
perm_importance["importance"] = perm_importance["importance"]
if importance_threshold < 0:
raise ValueError(
f"Provided importance threshold of {importance_threshold} must be greater than or equal to 0"
)
# Remove features with close to zero importance
perm_importance = perm_importance[
abs(perm_importance["importance"]) >= importance_threshold
]
# List is reversed to go from ascending order to descending order
perm_importance = perm_importance.iloc[::-1]
title = "Permutation Importance"
subtitle = (
"The relative importance of each input feature's "
"overall influence on the pipelines' predictions, computed using "
"the permutation importance algorithm."
)
data = [
go.Bar(
x=perm_importance["importance"],
y=perm_importance["feature"],
orientation="h",
)
]
layout = {
"title": "{0}<br><sub>{1}</sub>".format(title, subtitle),
"height": 800,
"xaxis_title": "Permutation Importance",
"yaxis_title": "Feature",
"yaxis": {"type": "category"},
}
fig = go.Figure(data=data, layout=layout)
return fig
def binary_objective_vs_threshold(pipeline, X, y, objective, steps=100):
"""Computes objective score as a function of potential binary classification
decision thresholds for a fitted binary classification pipeline.
Arguments:
pipeline (BinaryClassificationPipeline obj): Fitted binary classification pipeline
X (pd.DataFrame): The input data used to compute objective score
y (pd.Series): The target labels
objective (ObjectiveBase obj, str): Objective used to score
steps (int): Number of intervals to divide and calculate objective score at
Returns:
pd.DataFrame: DataFrame with thresholds and the corresponding objective score calculated at each threshold
"""
objective = get_objective(objective, return_instance=True)
if not objective.is_defined_for_problem_type(ProblemTypes.BINARY):
raise ValueError(
"`binary_objective_vs_threshold` can only be calculated for binary classification objectives"
)
if objective.score_needs_proba:
raise ValueError("Objective `score_needs_proba` must be False")
pipeline_tmp = copy.copy(pipeline)
thresholds = np.linspace(0, 1, steps + 1)
costs = []
for threshold in thresholds:
pipeline_tmp.threshold = threshold
scores = pipeline_tmp.score(X, y, [objective])
costs.append(scores[objective.name])
df = pd.DataFrame({"threshold": thresholds, "score": costs})
return df
def graph_binary_objective_vs_threshold(pipeline, X, y, objective, steps=100):
"""Generates a plot graphing objective score vs. decision thresholds for a fitted binary classification pipeline.
Arguments:
pipeline (PipelineBase or subclass): Fitted pipeline
X (pd.DataFrame): The input data used to score and compute scores
y (pd.Series): The target labels
objective (ObjectiveBase obj, str): Objective used to score, shown on the y-axis of the graph
steps (int): Number of intervals to divide and calculate objective score at
Returns:
plotly.Figure representing the objective score vs. threshold graph generated
"""
_go = import_or_raise(
"plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects"
)
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
objective = get_objective(objective, return_instance=True)
df = binary_objective_vs_threshold(pipeline, X, y, objective, steps)
title = f"{objective.name} Scores vs. Thresholds"
layout = _go.Layout(
title={"text": title},
xaxis={"title": "Threshold", "range": _calculate_axis_range(df["threshold"])},
yaxis={
"title": f"{objective.name} Scores vs. Binary Classification Decision Threshold",
"range": _calculate_axis_range(df["score"]),
},
)
data = []
data.append(_go.Scatter(x=df["threshold"], y=df["score"], line=dict(width=3)))
return _go.Figure(layout=layout, data=data)
def _is_feature_of_type(feature, X, ltype):
"""Determine whether the feature the user passed in to partial dependence is a Woodwork logical type."""
if isinstance(feature, int):
is_type = isinstance(X.ww.logical_types[X.columns[feature]], ltype)
else:
is_type = isinstance(X.ww.logical_types[feature], ltype)
return is_type
def _put_categorical_feature_first(features, first_feature_categorical):
"""If the user is doing a two-way partial dependence plot and one of the features is categorical,
we need to make sure the categorical feature is the first element in the tuple that's passed to sklearn.
This is because in the two-way grid calculation, sklearn will try to coerce every element of the grid to the
type of the first feature in the tuple. If we put the categorical feature first, the grid will be of type 'object'
which can accommodate both categorical and numeric data. If we put the numeric feature first, the grid will be of
type float64 and we can't coerce categoricals to float64 dtype.
"""
new_features = features if first_feature_categorical else (features[1], features[0])
return new_features
def _get_feature_names_from_str_or_col_index(X, names_or_col_indices):
"""Helper function to map the user-input features param to column names."""
feature_list = []
for name_or_index in names_or_col_indices:
if isinstance(name_or_index, int):
feature_list.append(X.columns[name_or_index])
else:
feature_list.append(name_or_index)
return feature_list
def _raise_value_error_if_any_features_all_nan(df):
"""Helper for partial dependence data validation."""
nan_pct = df.isna().mean()
all_nan = nan_pct[nan_pct == 1].index.tolist()
all_nan = [f"'{name}'" for name in all_nan]
if all_nan:
raise ValueError(
"The following features have all NaN values and so the "
f"partial dependence cannot be computed: {', '.join(all_nan)}"
)
def _raise_value_error_if_mostly_one_value(df, percentile):
"""Helper for partial dependence data validation."""
one_value = []
values = []
for col in df.columns:
normalized_counts = df[col].value_counts(normalize=True) + 0.01
normalized_counts = normalized_counts[normalized_counts > percentile]
if not normalized_counts.empty:
one_value.append(f"'{col}'")
values.append(str(normalized_counts.index[0]))
if one_value:
raise ValueError(
f"Features ({', '.join(one_value)}) are mostly one value, ({', '.join(values)}), "
f"and cannot be used to compute partial dependence. Try raising the upper percentage value."
)
def partial_dependence(
pipeline, X, features, percentiles=(0.05, 0.95), grid_resolution=100, kind="average"
):
"""Calculates one or two-way partial dependence. If a single integer or
string is given for features, one-way partial dependence is calculated. If
a tuple of two integers or strings is given, two-way partial dependence
is calculated with the first feature in the y-axis and second feature in the
x-axis.
Arguments:
pipeline (PipelineBase or subclass): Fitted pipeline
X (pd.DataFrame, np.ndarray): The input data used to generate a grid of values
for feature where partial dependence will be calculated at
features (int, string, tuple[int or string]): The target feature for which to create the partial dependence plot for.
If features is an int, it must be the index of the feature to use.
If features is a string, it must be a valid column name in X.
If features is a tuple of int/strings, it must contain valid column integers/names in X.
percentiles (tuple[float]): The lower and upper percentile used to create the extreme values for the grid.
Must be in [0, 1]. Defaults to (0.05, 0.95).
grid_resolution (int): Number of samples of feature(s) for partial dependence plot. If this value
is less than the maximum number of categories present in categorical data within X, it will be
set to the max number of categories + 1. Defaults to 100.
kind {'average', 'individual', 'both'}: The type of predictions to return. 'individual' will return the predictions for
all of the points in the grid for each sample in X. 'average' will return the predictions for all of the points in
the grid but averaged over all of the samples in X.
Returns:
pd.DataFrame, list(pd.DataFrame), or tuple(pd.DataFrame, list(pd.DataFrame)):
When `kind='average'`: DataFrame with averaged predictions for all points in the grid averaged
over all samples of X and the values used to calculate those predictions.
When `kind='individual'`: DataFrame with individual predictions for all points in the grid for each sample
of X and the values used to calculate those predictions. If a two-way partial dependence is calculated, then
the result is a list of DataFrames with each DataFrame representing one sample's predictions.
When `kind='both'`: A tuple consisting of the averaged predictions (in a DataFrame) over all samples of X and the individual
predictions (in a list of DataFrames) for each sample of X.
In the one-way case: The dataframe will contain two columns, "feature_values" (grid points at which the
partial dependence was calculated) and "partial_dependence" (the partial dependence at that feature value).
For classification problems, there will be a third column called "class_label" (the class label for which
the partial dependence was calculated). For binary classification, the partial dependence is only calculated
for the "positive" class.
In the two-way case: The data frame will contain grid_resolution number of columns and rows where the
index and column headers are the sampled values of the first and second features, respectively, used to make
the partial dependence contour. The values of the data frame contain the partial dependence data for each
feature value pair.
Raises:
ValueError: if the user provides a tuple of not exactly two features.
ValueError: if the provided pipeline isn't fitted.
ValueError: if the provided pipeline is a Baseline pipeline.
ValueError: if any of the features passed in are completely NaN
ValueError: if any of the features are low-variance. Defined as having one value occurring more than the upper
percentile passed by the user. By default 95%.
"""
# Dynamically set the grid resolution to the maximum number of values
# in the categorical/datetime variables if there are more categories/datetime values than resolution cells
X = infer_feature_types(X)
if isinstance(features, (list, tuple)):
is_categorical = [
_is_feature_of_type(f, X, ww.logical_types.Categorical) for f in features
]
is_datetime = [
_is_feature_of_type(f, X, ww.logical_types.Datetime) for f in features
]
else:
is_categorical = [
_is_feature_of_type(features, X, ww.logical_types.Categorical)
]
is_datetime = [_is_feature_of_type(features, X, ww.logical_types.Datetime)]
if isinstance(features, (list, tuple)):
if len(features) != 2:
raise ValueError(
"Too many features given to graph_partial_dependence. Only one or two-way partial "
"dependence is supported."
)
if not (
all([isinstance(x, str) for x in features])
or all([isinstance(x, int) for x in features])
):
raise ValueError(
"Features provided must be a tuple entirely of integers or strings, not a mixture of both."
)
X_features = (
X.ww.iloc[:, list(features)]
if isinstance(features[0], int)
else X.ww[list(features)]
)
else:
X_features = (
X.ww.iloc[:, [features]] if isinstance(features, int) else X.ww[[features]]
)
X_unknown = X_features.ww.select("unknown")
if len(X_unknown.columns):
# We drop the unknown columns in the pipelines, so we cannot calculate partial dependence for these
raise ValueError(
f"Columns {X_unknown.columns.values} are of type 'Unknown', which cannot be used for partial dependence"
)
X_cats = X_features.ww.select("categorical")
if any(is_categorical):
max_num_cats = max(X_cats.ww.describe().loc["nunique"])
grid_resolution = max([max_num_cats + 1, grid_resolution])
X_dt = X_features.ww.select("datetime")
if isinstance(features, (list, tuple)):
feature_names = _get_feature_names_from_str_or_col_index(X, features)
if any(is_datetime):
raise ValueError(
"Two-way partial dependence is not supported for datetime columns."
)
if any(is_categorical):
features = _put_categorical_feature_first(features, is_categorical[0])
else:
feature_names = _get_feature_names_from_str_or_col_index(X, [features])
if not pipeline._is_fitted:
raise ValueError("Pipeline to calculate partial dependence for must be fitted")
if pipeline.model_family == ModelFamily.BASELINE:
raise ValueError(
"Partial dependence plots are not supported for Baseline pipelines"
)
feature_list = X[feature_names]
_raise_value_error_if_any_features_all_nan(feature_list)
if feature_list.isnull().sum().any():
warnings.warn(
"There are null values in the features, which will cause NaN values in the partial dependence output. "
"Fill in these values to remove the NaN values.",
NullsInColumnWarning,
)
_raise_value_error_if_mostly_one_value(feature_list, percentiles[1])
wrapped = evalml.pipelines.components.utils.scikit_learn_wrapped_estimator(pipeline)
try:
if any(is_datetime):
timestamps = np.array(
[X_dt - pd.Timestamp("1970-01-01")] // np.timedelta64(1, "s")
).reshape(-1, 1)
grid, values = _grid_from_X(
timestamps, percentiles=percentiles, grid_resolution=grid_resolution
)
grid_dates = pd.to_datetime(
pd.Series(grid.squeeze()), unit="s"
).values.reshape(-1, 1)
# convert values to dates for the output
value_dates = pd.to_datetime(pd.Series(values[0]), unit="s")
# need to pass in the feature as an int index rather than string
feature_index = (
X.columns.tolist().index(features)
if isinstance(features, str)
else features
)
averaged_predictions, predictions = _partial_dependence_brute(
wrapped, grid_dates, [feature_index], X, response_method="auto"
)
# reshape based on the way scikit-learn reshapes the data
predictions = predictions.reshape(
-1, X.shape[0], *[val.shape[0] for val in values]
)
averaged_predictions = averaged_predictions.reshape(
-1, *[val.shape[0] for val in values]
)
preds = {
"average": averaged_predictions,
"individual": predictions,
"values": [value_dates],
}
else:
preds = sk_partial_dependence(
wrapped,
X=X,
features=features,
percentiles=percentiles,
grid_resolution=grid_resolution,
kind=kind,
)
except ValueError as e:
if "percentiles are too close to each other" in str(e):
raise ValueError(
"The scale of these features is too small and results in"
"percentiles that are too close together. Partial dependence"
"cannot be computed for these types of features. Consider"
"scaling the features so that they differ by > 10E-7"
)
else:
raise e
classes = None
if isinstance(pipeline, evalml.pipelines.BinaryClassificationPipeline):
classes = [pipeline.classes_[1]]
elif isinstance(pipeline, evalml.pipelines.MulticlassClassificationPipeline):
classes = pipeline.classes_
values = preds["values"]
if kind in ["average", "both"]:
avg_pred = preds["average"]
if isinstance(features, (int, str)):
avg_data = pd.DataFrame(
{
"feature_values": np.tile(values[0], avg_pred.shape[0]),
"partial_dependence": np.concatenate([pred for pred in avg_pred]),
}
)
elif isinstance(features, (list, tuple)):
avg_data = pd.DataFrame(avg_pred.reshape((-1, avg_pred.shape[-1])))
avg_data.columns = values[1]
avg_data.index = np.tile(values[0], avg_pred.shape[0])
if classes is not None:
avg_data["class_label"] = np.repeat(classes, len(values[0]))
if kind in ["individual", "both"]:
ind_preds = preds["individual"]
if isinstance(features, (int, str)):
ind_data = list()
for label in ind_preds:
ind_data.append(pd.DataFrame(label).T)
ind_data = pd.concat(ind_data)
ind_data.columns = [f"Sample {i}" for i in range(len(ind_preds[0]))]
if classes is not None:
ind_data["class_label"] = np.repeat(classes, len(values[0]))
ind_data.insert(0, "feature_values", np.tile(values[0], ind_preds.shape[0]))
elif isinstance(features, (list, tuple)):
ind_data = list()
for n, label in enumerate(ind_preds):
for i, sample in enumerate(label):
ind_df = pd.DataFrame(sample.reshape((-1, sample.shape[-1])))
ind_df.columns = values[1]
ind_df.index = values[0]
if n == 0:
ind_data.append(ind_df)
else:
ind_data[i] = pd.concat([ind_data[i], ind_df])
for sample in ind_data:
sample["class_label"] = np.repeat(classes, len(values[0]))
if kind == "both":
return (avg_data, ind_data)
elif kind == "individual":
return ind_data
elif kind == "average":
return avg_data
def _update_fig_with_two_way_partial_dependence(
_go,
fig,
label_df,
part_dep,
features,
is_categorical,
label=None,
row=None,
col=None,
):
"""Helper for formatting the two-way partial dependence plot."""
y = label_df.index
x = label_df.columns
z = label_df.values
if not any(is_categorical):
# No features are categorical. In this case, we pass both x and y data to the Contour plot so that
# plotly can figure out the axis formatting for us.
kwargs = {"x": x, "y": y}
fig.update_xaxes(
title=f"{features[1]}",
range=_calculate_axis_range(
np.array([x for x in part_dep.columns if x != "class_label"])
),
row=row,
col=col,
)
fig.update_yaxes(range=_calculate_axis_range(part_dep.index), row=row, col=col)
elif sum(is_categorical) == 1:
# One feature is categorical. Since we put the categorical feature first, the numeric feature will be the x
# axis. So we pass the x to the Contour plot so that plotly can format it for us.
# Since the y axis is a categorical value, we will set the y tickmarks ourselves. Passing y to the contour plot
# in this case will "work" but the formatting will look bad.
kwargs = {"x": x}
fig.update_xaxes(
title=f"{features[1]}",
range=_calculate_axis_range(
np.array([x for x in part_dep.columns if x != "class_label"])
),
row=row,
col=col,
)
fig.update_yaxes(
tickmode="array",
tickvals=list(range(label_df.shape[0])),
ticktext=list(label_df.index),
row=row,
col=col,
)
else:
# Both features are categorical so we must format both axes ourselves.
kwargs = {}
fig.update_yaxes(
tickmode="array",
tickvals=list(range(label_df.shape[0])),
ticktext=list(label_df.index),
row=row,
col=col,
)
fig.update_xaxes(
tickmode="array",
tickvals=list(range(label_df.shape[1])),
ticktext=list(label_df.columns),
row=row,
col=col,
)
fig.add_trace(
_go.Contour(z=z, name=label, coloraxis="coloraxis", **kwargs), row=row, col=col
)
def graph_partial_dependence(
pipeline, X, features, class_label=None, grid_resolution=100, kind="average"
):
"""Create an one-way or two-way partial dependence plot. Passing a single integer or
string as features will create a one-way partial dependence plot with the feature values
plotted against the partial dependence. Passing features a tuple of int/strings will create
a two-way partial dependence plot with a contour of feature[0] in the y-axis, feature[1]
in the x-axis and the partial dependence in the z-axis.
Arguments:
pipeline (PipelineBase or subclass): Fitted pipeline
X (pd.DataFrame, np.ndarray): The input data used to generate a grid of values
for feature where partial dependence will be calculated at
features (int, string, tuple[int or string]): The target feature for which to create the partial dependence plot for.
If features is an int, it must be the index of the feature to use.
If features is a string, it must be a valid column name in X.
If features is a tuple of strings, it must contain valid column int/names in X.
class_label (string, optional): Name of class to plot for multiclass problems. If None, will plot
the partial dependence for each class. This argument does not change behavior for regression or binary
classification pipelines. For binary classification, the partial dependence for the positive label will
always be displayed. Defaults to None.
grid_resolution (int): Number of samples of feature(s) for partial dependence plot
kind {'average', 'individual', 'both'}: Type of partial dependence to plot. 'average' creates a regular partial dependence
(PD) graph, 'individual' creates an individual conditional expectation (ICE) plot, and 'both' creates a
single-figure PD and ICE plot. ICE plots can only be shown for one-way partial dependence plots.
Returns:
plotly.graph_objects.Figure: figure object containing the partial dependence data for plotting
Raises:
ValueError: if a graph is requested for a class name that isn't present in the pipeline
"""
X = infer_feature_types(X)
if isinstance(features, (list, tuple)):
mode = "two-way"
is_categorical = [
_is_feature_of_type(f, X, ww.logical_types.Categorical) for f in features
]
if any(is_categorical):
features = _put_categorical_feature_first(features, is_categorical[0])
if kind == "individual" or kind == "both":
raise ValueError(
"Individual conditional expectation plot can only be created with a one-way partial dependence plot"
)
elif isinstance(features, (int, str)):
mode = "one-way"
is_categorical = _is_feature_of_type(features, X, ww.logical_types.Categorical)
_go = import_or_raise(
"plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects"
)
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
if (
isinstance(pipeline, evalml.pipelines.MulticlassClassificationPipeline)
and class_label is not None
):
if class_label not in pipeline.classes_:
msg = f"Class {class_label} is not one of the classes the pipeline was fit on: {', '.join(list(pipeline.classes_))}"
raise ValueError(msg)
part_dep = partial_dependence(
pipeline, X, features=features, grid_resolution=grid_resolution, kind=kind
)
ice_data = None
if kind == "both":
part_dep, ice_data = part_dep
elif kind == "individual":
ice_data = part_dep
part_dep = None
if mode == "two-way":
title = f"Partial Dependence of '{features[0]}' vs. '{features[1]}'"
layout = _go.Layout(
title={"text": title},
xaxis={"title": f"{features[1]}"},
yaxis={"title": f"{features[0]}"},
showlegend=True,
)
elif mode == "one-way":
feature_name = str(features)
if kind == "individual":
title = f"Individual Conditional Expectation of '{feature_name}'"
elif kind == "average":
title = f"Partial Dependence of '{feature_name}'"
else:
title = f"Partial Dependence of '{feature_name}' <br><sub>Including Individual Conditional Expectation Plot</sub>"
layout = _go.Layout(
title={"text": title},
xaxis={"title": f"{feature_name}"},
yaxis={"title": "Partial Dependence"},
showlegend=True,
)
fig = _go.Figure(layout=layout)
if isinstance(pipeline, evalml.pipelines.MulticlassClassificationPipeline):
class_labels = [class_label] if class_label is not None else pipeline.classes_
_subplots = import_or_raise(
"plotly.subplots", error_msg="Cannot find dependency plotly.graph_objects"
)
# If the user passes in a value for class_label, we want to create a 1 x 1 subplot or else there would
# be an empty column in the plot and it would look awkward
rows, cols = (
((len(class_labels) + 1) // 2, 2)
if len(class_labels) > 1
else (1, len(class_labels))
)
class_labels_mapping = {
class_label: str(class_label) for class_label in class_labels
}
# Don't specify share_xaxis and share_yaxis so that we get tickmarks in each subplot
fig = _subplots.make_subplots(rows=rows, cols=cols, subplot_titles=class_labels)
for i, label in enumerate(class_labels):
label_df = (
part_dep.loc[part_dep.class_label == label]
if part_dep is not None
else ice_data.loc[ice_data.class_label == label]
)
row = (i + 2) // 2
col = (i % 2) + 1
if ice_data is not None and kind == "individual":
fig = _add_ice_plot(_go, fig, ice_data, row=row, col=col, label=label)
else:
label_df.drop(columns=["class_label"], inplace=True)
if mode == "two-way":
_update_fig_with_two_way_partial_dependence(
_go,
fig,
label_df,
part_dep,
features,
is_categorical,
label,
row,
col,
)
elif mode == "one-way":
x = label_df["feature_values"]
y = label_df["partial_dependence"]
if is_categorical:
trace = _go.Bar(x=x, y=y, name=label)
else:
if ice_data is not None:
fig = _add_ice_plot(
_go, fig, ice_data, row=row, col=col, label=label
)
trace = _go.Scatter(
x=x,
y=y,
line=dict(width=3, color="rgb(99,110,250)"),
name="Partial Dependence: " + class_labels_mapping[label],
)
fig.add_trace(trace, row=row, col=col)
fig.update_layout(layout)
if mode == "two-way":
fig.update_layout(coloraxis=dict(colorscale="Bluered_r"), showlegend=False)
elif mode == "one-way":
title = f"{feature_name}"
x_scale_df = (
part_dep["feature_values"]
if part_dep is not None
else ice_data["feature_values"]
)
xrange = _calculate_axis_range(x_scale_df) if not is_categorical else None
yrange = _calculate_axis_range(
ice_data.drop("class_label", axis=1)
if ice_data is not None
else part_dep["partial_dependence"]
)
fig.update_xaxes(title=title, range=xrange)
fig.update_yaxes(range=yrange)
elif kind == "individual" and ice_data is not None:
fig = _add_ice_plot(_go, fig, ice_data)
elif part_dep is not None:
if ice_data is not None and not is_categorical:
fig = _add_ice_plot(_go, fig, ice_data)
if "class_label" in part_dep.columns:
part_dep.drop(columns=["class_label"], inplace=True)
if mode == "two-way":
_update_fig_with_two_way_partial_dependence(
_go,
fig,
part_dep,
part_dep,
features,
is_categorical,
label="Partial Dependence",
row=None,
col=None,
)
elif mode == "one-way":
if is_categorical:
trace = _go.Bar(
x=part_dep["feature_values"],
y=part_dep["partial_dependence"],
name="Partial Dependence",
)
else:
trace = _go.Scatter(
x=part_dep["feature_values"],
y=part_dep["partial_dependence"],
name="Partial Dependence",
line=dict(width=3, color="rgb(99,110,250)"),
)
fig.add_trace(trace)
return fig
def _add_ice_plot(_go, fig, ice_data, label=None, row=None, col=None):
x = ice_data["feature_values"]
y = ice_data
if "class_label" in ice_data.columns:
if label:
y = y[y["class_label"] == label]
y.drop(columns=["class_label"], inplace=True)
y = y.drop(columns=["feature_values"])
for i, sample in enumerate(y):
fig.add_trace(
_go.Scatter(
x=x,
y=y[sample],
line=dict(width=0.5, color="gray"),
name=f"Individual Conditional Expectation{': ' + label if label else ''}",
legendgroup="ICE" + label if label else "ICE",
showlegend=True if i == 0 else False,
),
row=row,
col=col,
)
return fig
def _calculate_axis_range(arr):
"""Helper method to help calculate the appropriate range for an axis based on the data to graph."""
max_value = arr.max()
min_value = arr.min()
margins = abs(max_value - min_value) * 0.05
return [min_value - margins, max_value + margins]
def get_prediction_vs_actual_data(y_true, y_pred, outlier_threshold=None):
"""Combines y_true and y_pred into a single dataframe and adds a column for outliers. Used in `graph_prediction_vs_actual()`.
Arguments:
y_true (pd.Series, or np.ndarray): The real target values of the data
y_pred (pd.Series, or np.ndarray): The predicted values outputted by the regression model.
outlier_threshold (int, float): A positive threshold for what is considered an outlier value. This value is compared to the absolute difference
between each value of y_true and y_pred. Values within this threshold will be blue, otherwise they will be yellow.
Defaults to None
Returns:
pd.DataFrame with the following columns:
* `prediction`: Predicted values from regression model.
* `actual`: Real target values.
* `outlier`: Colors indicating which values are in the threshold for what is considered an outlier value.
"""
if outlier_threshold and outlier_threshold <= 0:
raise ValueError(
f"Threshold must be positive! Provided threshold is {outlier_threshold}"
)
y_true = infer_feature_types(y_true)
y_pred = infer_feature_types(y_pred)
predictions = y_pred.reset_index(drop=True)
actual = y_true.reset_index(drop=True)
data = pd.concat([ | pd.Series(predictions) | pandas.Series |
## Online battery validation
import os
import glob
import pandas as pd
import numpy as np
import pickle
class BESS(object):
def __init__(self, max_energy, max_power, init_soc_proc, efficiency):
self.soc = init_soc_proc
self.max_e_capacity = max_energy
self.efficiency = efficiency
self.energy = self.max_e_capacity * (self.soc)/100
self.power = max_power
def calculate_NLF(self, net_load_day):
""" Net load factor
"""
df = pd.DataFrame(net_load_day).abs()
NLF = df.mean()/df.max()
return NLF[0]
def calculate_SBSPM(self, NR, LE, UE, error=0.01):
"""
Calculates second by second Service Performance Measure (SBSPM)
"""
if (NR >= LE - error) and (NR <= UE + error):
SBSPM = 1
elif (NR > UE + error):
SBSPM = max([1-abs(NR - UE), 0])
elif (NR < LE - error):
SBSPM = max([1-abs(NR - LE), 0])
else:
raise ValueError('The NR is undefined {}'.format(NR))
return SBSPM
def average_SPM_over_SP(self, SBSPM_list):
"""
Averages SPM over Settlement period
"""
SPM = sum(SBSPM_list)/1800
return SPM
def check_availability(self, SPM):
"""
Returns availability factor
"""
if SPM >= 0.95:
availability_factor = 1
elif (SPM >= 0.75) and (SPM < 0.95):
availability_factor = 0.75
elif (SPM >= 0.5) and (SPM < 0.75):
availability_factor = 0.5
elif (SPM < 0.5):
availability_factor = 0
return availability_factor
def save_to_pickle(name, list_to_save, save_path):
with open(os.path.join(save_path, '{}.pkl'.format(name)), 'wb') as f:
pickle.dump(list_to_save, f)
return
def load_from_pickle(name, save_path):
with open(os.path.join(save_path, '{}.pkl'.format(name)), 'rb') as f:
p = pickle.load(f)
return p
import os
import pandas as pd
path = "."
bess_name = "sonnen"
apath = os.path.join(path, 'simulations_{}'.format(bess_name), '{}'.format(1),'agent_{}.csv'.format(1))
nl = pd.read_csv(apath).loc[:,['nl_a{}'.format(1)]]
pb = pd.read_csv(apath).loc[:,['pb_a{}'.format(1)]]
c_reg = pd.read_csv(apath).loc[:,['c_reg_a{}'.format(1)]]
#flex_down = pd.read_csv(apath).loc[:,['flex_down_a{}'.format(1)]]
#flex_up = pd.read_csv(apath).loc[:,['flex_up_a{}'.format(1)]]
apath = os.path.join(path, 'forecasts', bess_name, 'min_power_forecast.csv')
min_efr_forecast = pd.read_csv(apath, sep=",")
min_efr_forecast[min_efr_forecast<0] = 0
apath = os.path.join(path, 'forecasts', bess_name, 'max_power_forecast.csv')
max_efr_forecast = pd.read_csv(apath, sep=",")
max_efr_forecast[max_efr_forecast<0] = 0
net_load_path = '.'
apath = os.path.join(net_load_path,'whole home power import 1 min 12 months averaged.csv')
net_load_true = pd.read_csv(apath, index_col=['timestamp'], parse_dates=True)
net_load_true = net_load_true[net_load_true.iloc[0:100*60*24+1,:].index[-1]:]
# project_address = r"C:\Users\h17353\PycharmProjects\Frequency\frequency_measurements\GB"
if bess_name == 'sonnen':
apath = os.path.join(net_load_path, 'GB-EFR-1-s-2019-sonnenEco75.csv')
elif bess_name == 'tesla':
apath = os.path.join(net_load_path, 'GB-EFR-1-s-2019-tesla135.csv')
efr_true = pd.read_csv(apath, sep=",", parse_dates=['dtm'],infer_datetime_format=True,
na_values=['nan', '?'], index_col='dtm', dayfirst=True)
efr_true = efr_true['2019-04-11 00:00:00':]
priority = 'schedule' #'efr' #
dataset_folder = '.'
sim_path = "."
save_path = "./save_results_09998"
len_betas = 1
len_agents = 150
len_days = 150
HH=48
M=30
T=60
## Initialize list of dataframes
dfs_SPM = pd.DataFrame(index=range(HH), columns=range(1,len_days+1,1))
dfs_avail_fact = pd.DataFrame(index=range(HH), columns=range(1,len_days+1,1))
dfs_bat_energy = pd.DataFrame(index=range(HH), columns=range(1,len_days+1,1))
dfs_bat_imb = pd.DataFrame(index=range(HH), columns=range(1,len_days+1,1))
dfs_nl_imb = pd.DataFrame(index=range(HH), columns=range(1,len_days+1,1))
dfs_frcst_imb = pd.DataFrame(index=range(HH), columns=range(1,len_days+1,1))
dfs_SPM_std = pd.DataFrame(index=range(HH), columns=range(1,len_days+1,1))
dfs_avail_fact_std = pd.DataFrame(index=range(HH), columns=range(1,len_days+1,1))
dfs_bat_energy_std = pd.DataFrame(index=range(HH), columns=range(1,len_days+1,1))
dfs_bat_imb_std = pd.DataFrame(index=range(HH), columns=range(1,len_days+1,1))
dfs_nl_imb_std = pd.DataFrame(index=range(HH), columns=range(1,len_days+1,1))
dfs_frcst_imb_std = pd.DataFrame(index=range(HH), columns=range(1,len_days+1,1))
df_NLF_agg_real = pd.DataFrame(columns=range(1,len_days+1,1))
df_NLF_agg_frcst = pd.DataFrame(columns=range(1,len_days+1,1))
dfs_real_NLF = pd.DataFrame(index=range(1,len_days+1,1), columns=range(len_agents))
dfs_frcst_NLF = pd.DataFrame(index=range(1,len_days+1,1), columns=range(len_agents))
df_agg_imb = pd.DataFrame(index=range(HH), columns=range(1,len_days+1,1))
dfs_daily_nl_imb = pd.DataFrame(index=range(1,len_days+1,1), columns=range(len_agents))
dfs_daily_bat_imb = pd.DataFrame(index=range(1,len_days+1,1), columns=range(len_agents))
dfs_daily_frcst_imb = pd.DataFrame(index=range(1,len_days+1,1), columns=range(len_agents))
for day in range(1,len_days+1,1):
print('day - {}'.format(day))
## Initialize daily profiles for all agents (EFR)
efr_true_day = efr_true.iloc[(24*60*60)*(day-1):(24*60*60)*day,]
## Initialize daily profiles per agent
net_load_true_day = net_load_true.iloc[(24*60)*(day-1):(24*60)*day,]
## Create dataframes to save Metric(agents)
df_SPM = pd.DataFrame(index=range(HH), columns=range(len_agents))
df_avail_fact = pd.DataFrame(index=range(HH), columns=range(len_agents))
df_frcst_imbs = pd.DataFrame(index=range(HH), columns=range(len_agents))
df_bat_energy = pd.DataFrame(index=range(HH), columns=range(len_agents))
df_nl_imb = pd.DataFrame(index=range(HH), columns=range(len_agents))
df_bat_imb = pd.DataFrame(index=range(HH), columns=range(len_agents))
for beta in range(len_betas):
print('beta - {}'.format(beta))
## Getting average int index of selected plans for all agents
beta_i = 0.9998
path = os.path.join(dataset_folder, 'run_beta_day{}'.format(day),'output-{}-beta-{}'.format(day, beta_i))
file_loc = glob.glob(os.path.join(path, next(os.walk(path))[1][0],'*selected-plans.csv'), recursive=True)
file_cost = glob.glob(os.path.join(path, next(os.walk(path))[1][0],'*global-cost.csv'), recursive=True)
#file_termin = glob.glob(os.path.join(path, next(os.walk(path))[1][0],'*termination.csv'), recursive=True)
#termin = pd.read_csv(file_termin[0]).iloc[:,1].max()
run = pd.read_csv(file_cost[0]).groupby(['Iteration']).min().iloc[-1,2:].idxmin().split('-')[1]
df = pd.read_csv(file_loc[0])
list_selected = df.loc[(df['Run']==int(run))&(df['Iteration']==df['Iteration'].max())].iloc[0,2:].values
#df = pd.read_csv(file_loc[0]).groupby(['Iteration']).mean().drop('Run', axis=1)
#list_selected = df.iloc[(df.index[-1]),:].astype(int).values #termin
agg_nl_real = pd.Series(data=np.zeros(48))
agg_nl_frcst = pd.Series(data=np.zeros(48))
for agent in range(len_agents):
#print('agent - {}'.format(agent))
## Initialize the battery instance
if bess_name == 'sonnen':
bess = BESS(0.9*7.5, 3.3, 50, 0.93)
elif bess_name == 'tesla':
bess = BESS(13.5, 3.68, 50, 0.95)
else:
raise ValueError('Bess name is not given')
## Extract plan id related net load, battery schedule, and EFR capacity for the agent
plan_id = list_selected[agent]
#print('Plan id {}'.format(plan_id))
apath = os.path.join(sim_path, 'simulations_{}'.format(bess_name), '{}'.format(day),'agent_{}.csv'.format(agent))
df_sim = pd.read_csv(apath)
nl = df_sim.loc[:,['nl_a{}'.format(plan_id+1)]]
pb = df_sim.loc[:,['pb_a{}'.format(plan_id+1)]]
c_reg = df_sim.loc[:,['c_reg_a{}'.format(plan_id+1)]]
min_efr_forecast_day = min_efr_forecast.iloc[(day-1)*(48):(day)*(48),[plan_id]]
max_efr_forecast_day = max_efr_forecast.iloc[(day-1)*(48):(day)*(48),[plan_id]]
## Initialize the metrics
net_load_day = []
ava_factors_day = []
frcst_imbs_day = []
SPMs = []
bat_imbs = []
bat_energy_day = []
## Iterate through half hourly intervals
for hh in range(HH):
#print('-------- {} ----------'.format(hh))
net_load_hh = []
spm_hh = []
frcst_imbs_hh = []
bat_imbs_hh = []
bat_energy_hh = []
## Obtain scheduled battery power and minmax response powers
Pbat_sch = pb.iloc[hh, 0]
P_down = min_efr_forecast_day.iloc[hh, 0]
P_up = max_efr_forecast_day.iloc[hh, 0]
C_reg = c_reg.iloc[hh, 0]
for m in range(M):
## Get minute-based net power
P_real = net_load_true_day.iloc[(hh*30)+m, agent]
for t in range(T):
## Check the schedule limits violation
P_bat_resp_req = efr_true_day.iloc[((hh*30)+m)*60+t,0]*C_reg ## 0 - 'power'
LE = efr_true_day.iloc[((hh*30)+m)*60+t,1]*C_reg ## 1 - 'LE'
UE = efr_true_day.iloc[((hh*30)+m)*60+t,2]*C_reg ## 2 - 'UE'
#print('LE {} - P_bat_resp_req {} - UE {}: (C_reg) {}'.format(LE, P_bat_resp_req, UE, C_reg))
## down-regulation
if P_bat_resp_req < 0:
P_bat_resp = max([P_bat_resp_req, -P_down*C_reg])
efr_frcst_imb = P_bat_resp_req - P_bat_resp ## neg imbalance
## up-regulation
elif P_bat_resp_req >= 0:
P_bat_resp = min([P_bat_resp_req, P_up*C_reg])
efr_frcst_imb = P_bat_resp_req - P_bat_resp ## pos imbalance
else:
raise ValueError('Error in P_bat_resp')
## normalize
if C_reg!= 0:
efr_frcst_imb = efr_frcst_imb/(bess.power*C_reg)
## Forecast imbalance
#print('P_down; P_bat_resp_req; P_up; efr_frcst_imb; (C_reg)')
#print('{}; {}; {}; {}; {}'.format(-P_down, P_bat_resp_req, P_up, efr_frcst_imb, C_reg))
frcst_imbs_hh.append(efr_frcst_imb)
## Check the energy limits violation
Pbat = Pbat_sch + P_bat_resp_req
Pbat_ch = 0.
Pbat_dc = 0.
## down-regulation
if Pbat < 0.:
Pbat_ch = max([Pbat, -bess.power, (bess.energy-bess.max_e_capacity)/(bess.efficiency*(1./3600.))])
power_imb = Pbat - Pbat_ch ## neg or zero
if power_imb < 0.:
if P_bat_resp_req < 0.:
if priority=='schedule':
## priority of battery schedule
if (Pbat_sch-Pbat_ch) <= 0.:
NR = 0.
## Pbat_sch = Pbat_ch
else:
NR = max([P_bat_resp_req, Pbat_ch - Pbat_sch])
## Pbat_sch = Pbat_sch
elif priority=='efr':
## priority of efr
if (P_bat_resp_req-Pbat_ch) <= 0.:
NR = Pbat_ch
## Pbat_sch = 0
else:
NR = P_bat_resp_req
## Pbat_sch = Pbat_ch - P_bat_resp_req
else:
raise ValueError('Priority is not given')
elif P_bat_resp_req >= 0.:
NR = P_bat_resp_req
## Pbat_sch = Pbat_sch - power_imb
elif power_imb == 0.:
NR = P_bat_resp_req
else:
raise ValueError('Error in power_imb')
elif Pbat >= 0.:
Pbat_dc = min([Pbat, bess.power, bess.efficiency*(bess.energy-0)/(1./3600.)])
power_imb = Pbat - Pbat_dc ## pos or zero
if power_imb > 0.:
if P_bat_resp_req > 0.:
if priority=='schedule':
## priority of battery schedule
if (Pbat_sch-Pbat_dc)>=0.:
NR = 0.
## Pbat_sch = Pbat_ch
else:
NR = min([P_bat_resp_req, Pbat_dc - Pbat_sch])
## Pbat_sch = Pbat_sch
elif priority=='efr':
## priority of efr
if (P_bat_resp_req-Pbat_dc) >= 0.:
NR = Pbat_dc
## Pbat_sch = 0
else:
NR = P_bat_resp_req
## Pbat_sch = Pbat_dc - P_bat_resp_req
else:
raise ValueError('Priority is not given')
elif P_bat_resp_req <= 0:
NR = P_bat_resp_req
## Pbat_sch = Pbat_sch - power_imb
elif power_imb == 0:
NR = P_bat_resp_req
else:
raise ValueError('Error in power_imb')
else:
raise ValueError('Error in Pbat')
#print('Pbat_sch {} P_bat_resp_req {} NR {} power_imb {} bess_energy {}'.format(Pbat_sch,P_bat_resp_req,
# NR, power_imb, bess.energy))
bat_imbs_hh.append(power_imb)
## Change the energy state
bess.energy += -(Pbat_ch*bess.efficiency + Pbat_dc/bess.efficiency) * (1.0/3600.0)
Pb_real = Pbat_ch + Pbat_dc
P_nl = P_real - Pb_real
#print('P_real - Pb_real = P_nl')
#print('{} - {} = {}'.format(P_real,Pb_real,P_nl))
net_load_hh.append(P_nl)
if C_reg!= 0:
NR = NR/(bess.power*C_reg)
LE = LE/(bess.power*C_reg)
UE = UE/(bess.power*C_reg)
SBSPM = bess.calculate_SBSPM(NR, LE, UE, error=0.01)
spm_hh.append(SBSPM)
bat_energy_hh.append(bess.energy)
bat_energy_day.append(pd.Series(bat_energy_hh).mean())
net_load_day.append(pd.Series(net_load_hh).mean())
bat_imbs.append(pd.Series(bat_imbs_hh).mean())
SPM = bess.average_SPM_over_SP(spm_hh)
SPMs.append(SPM)
ava_factors_day.append(bess.check_availability(SPM))
frcst_imbs_day.append( | pd.Series(frcst_imbs_hh) | pandas.Series |
# -*- coding: utf-8 -*-
import pandas as pd
from sklearn import metrics
import numpy as np
from config import *
def sigmoid(x, a=60, b=30):
return 1.0 / (1 + np.exp(-a * x + b))
def split_map(x, a=0.3, b=0.7):
return 0. if x < a else 1. if x > b else x
def extract_features(infile, degree=0.95):
df = pd.read_csv(infile)
obj = df.to_dict()
fscores = obj['fscore']
tot = sum(fscores.values())
res = list()
acc = 0
for k in fscores.keys()[::-1]:
acc += fscores[k]
if float(acc) / float(tot) > degree:
break
res.append(obj['feature'][k])
print(len(res))
print(res)
def calc_auc(df):
y_true = df['Label'].values
y_pred = df[probability_consumed_label].values
auc = metrics.roc_auc_score(np.array(y_true), np.array(y_pred))
return | pd.DataFrame({coupon_label: [df[coupon_label][0]], 'auc': [auc]}) | pandas.DataFrame |
import os
import unittest
import warnings
from collections import defaultdict
from unittest import mock
import numpy as np
import pandas as pd
import six
from dataprofiler.profilers import TextColumn, utils
from dataprofiler.profilers.profiler_options import TextOptions
from dataprofiler.tests.profilers import utils as test_utils
test_root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
class TestTextColumnProfiler(unittest.TestCase):
def setUp(self):
test_utils.set_seed(seed=0)
def test_profiled_vocab(self):
"""
Checks whether the vocab list for the profiler is correct.
:return:
"""
df1 = pd.Series([
"abcd", "aa", "abcd", "aa", "b", "4", "3", "2", "dfd", "2",
]).apply(str)
df2 = pd.Series(["1", "1", "ee", "ff", "ff", "gg",
"gg", "abcd", "aa", "b", "ee", "b"]).apply(str)
df3 = pd.Series([
"NaN", "b", "nan", "c",
]).apply(str)
text_profiler = TextColumn(df1.name)
text_profiler.update(df1)
unique_vocab = dict.fromkeys(''.join(df1.tolist())).keys()
six.assertCountEqual(self, unique_vocab, text_profiler.vocab)
six.assertCountEqual(
self, set(text_profiler.vocab), text_profiler.vocab)
text_profiler.update(df2)
df = pd.concat([df1, df2])
unique_vocab = dict.fromkeys(''.join(df.tolist())).keys()
six.assertCountEqual(self, unique_vocab, text_profiler.vocab)
six.assertCountEqual(
self, set(text_profiler.vocab), text_profiler.vocab)
text_profiler.update(df3)
df = pd.concat([df1, df2, df3])
unique_vocab = dict.fromkeys(''.join(df.tolist())).keys()
six.assertCountEqual(self, unique_vocab, text_profiler.vocab)
def test_profiled_str_numerics(self):
"""
Checks whether the vocab list for the profiler is correct.
:return:
"""
def mean(df):
total = 0
for item in df:
total += item
return total / len(df)
def var(df):
var = 0
mean_df = mean(df)
for item in df:
var += (item - mean_df) ** 2
return var / (len(df) - 1)
def batch_variance(mean_a, var_a, count_a, mean_b, var_b, count_b):
delta = mean_b - mean_a
m_a = var_a * (count_a - 1)
m_b = var_b * (count_b - 1)
M2 = m_a + m_b + delta ** 2 * count_a * count_b / (
count_a + count_b)
return M2 / (count_a + count_b - 1)
df1 = pd.Series([
"abcd", "aa", "abcd", "aa", "b", "4", "3", "2", "dfd", "2", np.nan,
]).apply(str)
df2 = pd.Series(["1", "1", "ee", "ff", "ff", "gg",
"gg", "abcd", "aa", "b", "ee", "b"]).apply(str)
df3 = pd.Series([
"NaN", "b", "nan", "c", None,
]).apply(str)
text_profiler = TextColumn(df1.name)
text_profiler.update(df1)
self.assertEqual(mean(df1.str.len()), text_profiler.mean)
self.assertAlmostEqual(var(df1.str.len()), text_profiler.variance)
self.assertAlmostEqual(
np.sqrt(var(df1.str.len())), text_profiler.stddev)
variance = batch_variance(
mean_a=text_profiler.mean,
var_a=text_profiler.variance,
count_a=text_profiler.sample_size,
mean_b=mean(df2.str.len()),
var_b=var(df2.str.len()),
count_b=df2.count()
)
text_profiler.update(df2)
df = pd.concat([df1, df2])
self.assertEqual(df.str.len().mean(), text_profiler.mean)
self.assertAlmostEqual(variance, text_profiler.variance)
self.assertAlmostEqual(np.sqrt(variance), text_profiler.stddev)
variance = batch_variance(
mean_a=text_profiler.mean,
var_a=text_profiler.variance,
count_a=text_profiler.match_count,
mean_b=mean(df3.str.len()),
var_b=var(df3.str.len()),
count_b=df3.count()
)
text_profiler.update(df3)
df = | pd.concat([df1, df2, df3]) | pandas.concat |
import abc
import os
import numpy as np
import pandas as pd
from odin.utils import get_root_logger
from odin.utils.draw_utils import make_multi_category_plot, display_sensitivity_impact_plot, \
plot_categories_curve, plot_class_distribution
logger = get_root_logger()
class AnalyzerInterface(metaclass=abc.ABCMeta):
__detector_name = "detector"
result_saving_path = "./results/"
dataset = None
__valid_metrics = None
__valid_curves = None
metric = None
# ONLY FOR TESTING, TO REMOVE
use_new_normalization = True # if True use the new implementation of normalization (categories + properties),
# otherwise use the old one (only categories)
_use_normalization = False
_norm_factors = None
normalizer_factor = 1
conf_thresh = 0.5
saved_results = {}
fp_errors = None
__SAVE_PNG_GRAPHS = True
__is_binary = False
def __init__(self, detector_name, dataset, result_saving_path, use_normalization, norm_factor_categories,
norm_factors_properties, conf_thresh, metric, valid_metrics, valid_curves, is_binary,
save_graphs_as_png):
self.__detector_name = detector_name
self.dataset = dataset
if not os.path.exists(result_saving_path):
os.mkdir(result_saving_path)
self.result_saving_path = os.path.join(result_saving_path, detector_name)
if not os.path.exists(self.result_saving_path):
os.mkdir(self.result_saving_path)
self._use_normalization = use_normalization
self._norm_factors = self.__create_norm_factors_dict(norm_factor_categories, norm_factors_properties)
self.conf_thresh = conf_thresh
self.metric = metric
self.__valid_metrics = valid_metrics
self.__valid_curves = valid_curves
self.__is_binary = is_binary
self.__SAVE_PNG_GRAPHS = save_graphs_as_png
def analyze_property(self, property_name, possible_values=None, labels=None, show=True, metric=None):
"""Analyzes the performances of the model for each category considering only the ground truth having a certain
property value.
Parameters
----------
property_name: str
Name of the property to analyze
possible_values: list, optional
Property values to be analyzed. If None consider all the possible values of the property. (default is None)
labels: list, optional
Property values names to show in the graph. If None use the display name in the properties file.
(default is None)
show: bool, optional
If True results are shown in a graph. (default is True)
metric: str, optional
Metric used for the analysis. If None use the default metrics. (default is None)
"""
if property_name not in self.dataset.get_property_keys():
logger.error(f"Property '{property_name}' not valid")
return
if possible_values is None or not possible_values:
possible_values = self.dataset.get_values_for_property(property_name)
else:
if not self._is_valid_property(property_name, possible_values):
return
if labels is None:
labels = []
for p in possible_values:
display_name = self.dataset.get_display_name_of_property_value(property_name, p)
if display_name is None:
labels.append(p)
else:
labels.append(display_name)
elif len(possible_values) != len(labels):
logger.error("Inconsistency between number of possible values and labels.")
return
if metric is None:
metric = self.metric
elif not self._is_valid_metric(metric):
return
if metric not in self.saved_results.keys():
self.saved_results[metric] = {}
if self.__is_binary:
categories = [self.dataset.get_categories_names()[0]]
else:
categories = self.dataset.get_categories_names()
for category in categories:
category_id = self.dataset.get_category_id_from_name(category)
if category not in self.saved_results[metric].keys():
self.saved_results[metric][category] = {}
self.saved_results[metric][category]['all'] = self._calculate_metric_for_category(category,
metric=metric)
matching = self.saved_results[metric][category]['all']["matching"]
self.saved_results[metric][category][property_name] = self._calculate_metric_for_properties_of_category(
category, category_id, property_name, possible_values, matching, metric=metric)
title = "Analysis of {} property".format(property_name)
if show:
make_multi_category_plot(self.saved_results[metric], property_name, labels, title, metric,
self.__SAVE_PNG_GRAPHS, self.result_saving_path)
def analyze_properties(self, properties=None, metric=None):
"""Analyzes the performances of the model for each category considering only the ground truth having a certain
property value. The analysis is performed for all the properties specified in the parameters.
Parameters
----------
properties: list of str, optional
Names of the properties to analyze. If None perform the analysis for all the properties. (default is None)
metric: str
Metric used for the analysis. If None use the default metrics. (default is None)
"""
if properties is None:
properties = self.dataset.get_property_keys()
else:
if not self._are_valid_properties(properties):
return
if metric is None:
metric = self.metric
elif not self._is_valid_metric(metric):
return
for pkey in properties:
values = self.dataset.get_values_for_property(pkey)
self.analyze_property(pkey, values, metric=metric)
def show_distribution_of_properties(self, properties=None):
"""Shows the distribution of the property among its different values and for each property value shows the
distribution among the categories.
Parameters
----------
properties: list of str, optional
Names of the properties to analyze the distribution. If None perform the analysis for all the properties.
(default is None)
"""
if properties is None:
properties = self.dataset.get_property_keys()
elif not self._are_valid_properties(properties):
return
for property in properties:
self.show_distribution_of_property(property)
def analyze_sensitivity_impact_of_properties(self, properties=None, metric=None):
"""Analyzes the sensitivity and the impact of the properties specified in the parameters.
Parameters
----------
properties: list of str, optional
Names of the properties to consider in the analysis. If None consider all the properties. (default is None)
metric: str
Metric used for the analysis. If None use the default metrics. (default is None)
"""
if properties is None:
properties = self.dataset.get_property_keys()
else:
if not self._are_valid_properties(properties):
return
display_names = [self.dataset.get_display_name_of_property(pkey) for pkey in properties]
if metric is None:
metric = self.metric
elif not self._is_valid_metric(metric):
return
for pkey in properties:
values = self.dataset.get_values_for_property(pkey)
self.analyze_property(pkey, values, show=False, metric=metric)
display_sensitivity_impact_plot(self.saved_results[metric], self.result_saving_path, properties,
display_names, metric, self.__SAVE_PNG_GRAPHS)
def get_tp_distribution(self, categories=None):
if self.__is_binary:
logger.error("Not supported for binary classification")
return
if categories is None:
categories = self.dataset.get_categories_names()
elif not self._are_valid_categories(categories):
return
if categories is not None:
tp_classes = self._analyze_true_positive_for_categories(categories)
plot_class_distribution(tp_classes, self.result_saving_path, self.__SAVE_PNG_GRAPHS, "True Positive")
def get_fn_distribution(self, categories=None):
if self.__is_binary:
logger.error("Not supported for binary classification")
return
if categories is None:
categories = self.dataset.get_categories_names()
elif not self._are_valid_categories(categories):
return
if categories is not None:
tp_classes = self._analyze_false_negative_for_categories(categories)
plot_class_distribution(tp_classes, self.result_saving_path, self.__SAVE_PNG_GRAPHS, "False Negative")
@abc.abstractmethod
def _analyze_true_positive_for_categories(self, categories):
pass
@abc.abstractmethod
def _analyze_false_negative_for_categories(self, categories):
pass
def get_fp_error_distribution(self, categories=None):
if self.__is_binary:
logger.error("Not supported for binary classification")
return
if categories is None:
categories = self.dataset.get_categories_names()
elif not self._are_valid_categories(categories):
return
self.fp_errors = None
error_dict_total = self._analyze_false_positive_errors(categories)
plot_class_distribution(error_dict_total["distribution"], self.result_saving_path, self.__SAVE_PNG_GRAPHS,
"False Positive")
def analyze_false_positive_errors(self, categories=None, metric=None):
if self.__is_binary:
logger.error("Not supported for binary classification")
return
if categories is None:
categories = self.dataset.get_categories_names()
elif not self._are_valid_categories(categories):
return
if metric is None:
metric = self.metric
elif not self._is_valid_metric(metric):
return
if not self.__is_binary:
self.get_fp_error_distribution(categories)
for category in categories:
self.analyze_false_positive_error_for_category(category, categories=categories, metric=metric)
def analyze_curve_for_categories(self, categories=None, curve='precision_recall_curve'):
if self.__is_binary:
categories = [self.dataset.get_categories_names()[0]]
else:
if categories is None:
categories = self.dataset.get_categories_names()
elif not self._are_valid_categories(categories):
return
if not self.__is_valid_curve(curve):
return
results = self._compute_curve_for_categories(categories, curve)
plot_categories_curve(results, curve, self.__SAVE_PNG_GRAPHS, self.result_saving_path)
def set_normalization(self, use_normalization, with_properties=True, norm_factor_categories=None,
norm_factors_properties=None):
"""Sets the normalization for the metrics calculation
Parameters
----------
use_normalization: bool
Specifies whether or not to use normalization
with_properties: bool
Specifies whether or not to normalize also on properties values
norm_factor_categories: float, optional
Categories normalization factor (default is 1/number of categories)
norm_factors_properties: list of pairs, optional
Properties normalization factors.
Each pair specifies the normalization factor to apply to a specific property.
(Example: [(name1, value1), (name2, value2), ...]
"""
self._use_normalization = use_normalization
if with_properties:
self.use_new_normalization = True
else:
self.use_new_normalization = False
if norm_factor_categories is not None:
self._norm_factors["categories"] = norm_factor_categories
if norm_factors_properties is not None:
dataset_p_names = self.dataset.get_property_keys()
for p_name, p_value in norm_factors_properties:
if p_name in dataset_p_names:
self._norm_factors[p_name] = p_value
else:
logger.warn("Invalid property name in 'norm_factors_properties'.")
self.clear_saved_results()
self.fp_errors = None
def set_confidence_threshold(self, threshold):
"""Sets the threshold value. Predictions with a confidence lower than the threshold are ignored.
Parameters
----------
threshold: float
Threshold value. Must be between 0 and 1
"""
if threshold < 0 or threshold > 1:
logger.error("Invalid threshold value.")
return
self.conf_thresh = threshold
self.clear_saved_results()
self.fp_errors = None
def clear_saved_results(self, metrics=None):
if metrics is None:
self.saved_results = {}
else:
for m in metrics:
if m in self.saved_results.keys():
self.saved_results[m] = {}
else:
if self._is_valid_metric(m):
logger.warn(f"No data for metric {m}")
def _get_report_results(self, default_metrics, metrics, categories, properties, show_categories, show_properties):
if metrics is None:
metrics = default_metrics
else:
for m in metrics:
if m not in default_metrics and m != 'custom':
logger.error(
"Metric {} not supported for report. Available metrics: {}.".format(m, default_metrics))
return
if self.__is_binary:
show_categories = False
else:
if categories is None:
categories = self.dataset.get_categories_names()
elif not categories:
logger.warn("Empty categories list")
show_categories = False
else:
if not self._are_valid_categories(categories):
return
if properties is None:
properties = self.dataset.get_property_keys()
elif not properties:
logger.warn("Empty properties list")
show_properties = False
else:
if not self._are_valid_properties(properties):
return
input_report = self._get_input_report(properties, show_properties)
results = {}
types = {}
if self.__is_binary:
types = {"total": "Total"}
else:
types["avg macro"] = "Total"
types["avg micro"] = "Total"
if show_categories:
for cat in categories:
types[cat] = "Category"
if show_properties:
for prop in properties:
p_values = self.dataset.get_values_for_property(prop)
for p_value in p_values:
p_value = prop + "_" + "{}".format(p_value)
types[p_value] = "Property"
type_dict = {"type": types}
for metric in metrics:
results[metric] = self._calculate_report_for_metric(input_report, categories, properties, show_categories,
show_properties, metric)
type_dataframe = | pd.DataFrame(type_dict) | pandas.DataFrame |
import argparse
import itertools
import hdbscan
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.spatial.distance import pdist, squareform
from sklearn.manifold import TSNE, MDS
from sklearn.decomposition import PCA
from sklearn.metrics import confusion_matrix, matthews_corrcoef
from sklearn.model_selection import RepeatedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from umap import UMAP
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--methods", nargs="+", help="methods in the file")
parser.add_argument("--grid-search-total", help="TSV file with the grid search data")
parser.add_argument("--output", nargs=2, help="the path where the best thresholds will be saved.")
parser.add_argument("--output-hyperparameters", nargs=2, help="the path where the best parameters will be saved. ")
parser.add_argument("--output-figure-HDBSCAN", help="PNG with the results displayed graphically for HDBSCAN thresholds")
parser.add_argument("--output-figure-grid-search", help="PNG with the results displayed graphically for grid search")
args = parser.parse_args()
df = pd.read_csv(args.grid_search_total, sep="\t")
if args.output_figure_HDBSCAN:
#TODO: filter dataframe to best set of parameters for t-sne and umap
grouped_df = df.groupby(["method", "distance_threshold"])
maximums = grouped_df.max()
maximums = maximums.reset_index()
sns.relplot(data=maximums, x="distance_threshold", y="validation_mcc", col="method", kind="scatter")
plt.savefig(args.output_figure_HDBSCAN)
if args.output_figure_grid_search is not None:
sns.set_theme()
fig = plt.figure(figsize=(16, 8), constrained_layout=False)
gs = gridspec.GridSpec(2, 4, figure=fig, hspace=0.4, wspace=0.6)
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[0, 1])
ax3 = fig.add_subplot(gs[1, 0])
ax4 = fig.add_subplot(gs[1, 1])
# Creates two subplots and unpacks the output array immediately
sns.scatterplot(x='learning_rate', y='training_mcc', data=df, hue='perplexity', palette="Set1", ax=ax1)
ax1.set_xlabel("Learning Rate")
ax1.set_ylabel("MCC")
ax1.set_title('TSNE')
sns.scatterplot(x='perplexity', y='training_mcc', data=df, hue='learning_rate', palette="Set1", ax=ax2)
ax2.set_xlabel("Perplexity")
ax2.set_ylabel("MCC")
ax2.set_title('TSNE')
sns.scatterplot(x='n_neighbors', y='training_mcc', data=df, hue='min_dist', palette="Set1", ax=ax3)
ax3.set_xlabel("N Neighbors")
ax3.set_ylabel("MCC")
ax3.set_title("UMAP")
sns.scatterplot(x='min_dist', y='training_mcc', data=df, hue='n_neighbors', palette="Set1", ax=ax4)
ax4.set_xlabel("Minimum Distance")
ax4.set_ylabel("MCC")
ax4.set_title("UMAP")
ax1.set_ylim(0,1)
ax2.set_ylim(0,1)
ax3.set_ylim(0,1)
ax4.set_ylim(0,1)
plt.savefig(args.output_figure_grid_search)
if args.output is not None:
#make this a dataframe
max_values = []
for method in args.methods:
method_dict = dict(df.groupby("method").get_group(method).iloc[df.groupby("method").get_group(method).groupby("distance_threshold")["validation_mcc"].mean().argmax()])
max_values.append(method_dict)
max_df = pd.DataFrame(max_values)
max_index = max_df["method"].values.tolist()
max_thresholds = max_df["distance_threshold"].values.tolist()
max_df.to_csv(args.output[0])
df_TSNE = df[df.method == 't-sne'].dropna(axis = 1)
df_UMAP = df[df.method == 'umap'].dropna(axis = 1)
TSNE_grouped = pd.DataFrame(df_TSNE.groupby(["perplexity", "learning_rate"])['training_mcc'].mean())
tsne_val = TSNE_grouped.iloc[TSNE_grouped["training_mcc"].argmax()]
UMAP_grouped = pd.DataFrame(df_UMAP.groupby(["n_neighbors", "min_dist"])['training_mcc'].mean())
umap_val = UMAP_grouped.iloc[UMAP_grouped["training_mcc"].argmax()]
file = open(args.output[1], "w")
file.write("tsne perplexity: " + str(tsne_val.name[0]) + "\n" + "tsne learning_rate: " + str(tsne_val.name[1]) + "\n" + "mcc best value: " + str(tsne_val.values[0]) + "\n")
file.write("umap nearest_neighbors: " + str(umap_val.name[0]) + "\n" + "umap min_dist: " + str(umap_val.name[1]) + "\n" + "mcc best value: " + str(umap_val.values[0]))
file.write("\n".join([str(max_index[i]) + " best threshold is " + str(max_thresholds[i]) + "\n" for i in range(0,len(max_thresholds))]))
file.close()
if args.output_hyperparameters is not None:
max_values = []
for method in args.methods:
method_dict = dict(df.groupby("method").get_group(method).iloc[df.groupby("method").get_group(method).groupby("distance_threshold")["validation_mcc"].mean().argmax()])
max_values.append(method_dict)
max_df = | pd.DataFrame(max_values) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 15 14:54:22 2021
@author: 10979
"""
from Bio import SeqIO
from Bio import Seq
import regex as re
import pandas as pd
import numpy as np
def lncRNA_features(fasta):
records = SeqIO.parse(fasta, 'fasta')
orf_length = []
orf_count = []
orf_position = []
ID = []
for record in records:
for strand, seq in (1, record.seq), (-1, record.seq.reverse_complement()):
for frame in range(3):
length = 3 * ((len(seq)-frame) // 3)
for pro in seq[frame:frame+length].translate(table = 1).split("*")[:-1]:
if 'M' in pro:
orf = pro[pro.find('M'):]
pos = seq[frame:frame+length].translate(table=1).find(orf)*3 + frame +1
orf_length.append(len(orf)*3+3)
orf_count.append(frame)
orf_position.append(pos)
ID.append(record.id)
else:
orf_length.append(0)
orf_count.append(0)
orf_position.append(0)
ID.append(record.id)
data = {
'ID':ID,
'orf_length':orf_length,
'orf_count':orf_count,
'orf_position':orf_position}
df = pd.DataFrame(data)
df.sort_values(by=["ID","orf_length"],ascending=[False,False],inplace=True)
df.duplicated(['ID'])
df2=df.drop_duplicates(['ID'])
ORF = df2
print('ORF features over.')
transcript_length = []
start_codon_number = []
end_codon_number = []
GC_pro = []
ID=[]
for record in SeqIO.parse(fasta, 'fasta'):
ID.append(record.id)
record = record.upper()
transcript_length.append(len(record))
start_codon_number.append(record.seq.count('ATG'))
end_codon_number.append(record.seq.count('TAG')+record.seq.count('TAA')+record.seq.count('TGA'))
GC_pro.append(100*float(record.seq.count('G')+record.seq.count('C'))/len(record))
data = {
'ID':ID,
'transcript_length':transcript_length,
'start_codon_number':start_codon_number,
'end_codon_number':end_codon_number,
'GC_pro':GC_pro
}
df = | pd.DataFrame(data) | pandas.DataFrame |
from termcolor import colored
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
############### Show colored text #############
def bg(value, type='num', color='blue'):
value = str('{:,}'.format(value)) if type == 'num' else str(value)
return colored(' '+value+' ', color, attrs=['reverse', 'blink'])
############ Print the variable name ##############
# Credits: https://stackoverflow.com/questions/18425225/getting-the-name-of-a-variable-as-a-string
import inspect
def var2str(var):
"""
Gets the name of var. Does it from the out most frame inner-wards.
:param var: variable to get name from.
:return: string
"""
for fi in reversed(inspect.stack()):
names = [var_name for var_name, var_val in fi.frame.f_locals.items() if var_val is var]
if len(names) > 0:
return names[0]
############### Summary Table #####################
from scipy import stats
# Summary dataframe
def summary(df, sort_col=0):
summary = | pd.DataFrame({'dtypes': df.dtypes}) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin, clone
from sklearn.utils.validation import check_is_fitted
from ._grouped_utils import _split_groups_and_values
class GroupedTransformer(BaseEstimator, TransformerMixin):
"""
Construct a transformer per data group. Splits data by groups from single or multiple columns
and transforms remaining columns using the transformers corresponding to the groups.
:param transformer: the transformer to be applied per group
:param groups: the column(s) of the matrix/dataframe to select as a grouping parameter set. If None,
the transformer will be applied to the entire input without grouping
:param use_global_model: Whether or not to fall back to a general transformation in case a group
is not found during `.transform()`
"""
_check_kwargs = {"accept_large_sparse": False}
def __init__(self, transformer, groups, use_global_model=True):
self.transformer = transformer
self.groups = groups
self.use_global_model = use_global_model
def __fit_single_group(self, group, X, y=None):
try:
return clone(self.transformer).fit(X, y)
except Exception as e:
raise type(e)(f"Exception for group {group}: {e}")
def __fit_grouped_transformer(
self, X_group: pd.DataFrame, X_value: np.array, y=None
):
"""Fit a transformer to each group"""
# Make the groups based on the groups dataframe, use the indices on the values array
try:
group_indices = X_group.groupby(X_group.columns.tolist()).indices
except TypeError:
# This one is needed because of line #918 of sklearn/utils/estimator_checks
raise TypeError("argument must be a string, date or number")
if y is not None:
if isinstance(y, pd.Series):
y.index = X_group.index
grouped_transformers = {
# Fit a clone of the transformer to each group
group: self.__fit_single_group(group, X_value[indices, :], y[indices])
for group, indices in group_indices.items()
}
else:
grouped_transformers = {
group: self.__fit_single_group(group, X_value[indices, :])
for group, indices in group_indices.items()
}
return grouped_transformers
def __check_transformer(self):
if not hasattr(self.transformer, "transform"):
raise ValueError(
"The supplied transformer should have a 'transform' method"
)
def fit(self, X, y=None):
"""
Fit the transformers to the groups in X
:param X: Array-like with at least two columns, of which at least one corresponds to groups defined in init,
and the remaining columns represent the values to transform.
:param y: (Optional) target variable
"""
self.__check_transformer()
self.fallback_ = None
if self.groups is None:
self.transformers_ = clone(self.transformer).fit(X, y)
return self
X_group, X_value = _split_groups_and_values(
X, self.groups, **self._check_kwargs
)
self.transformers_ = self.__fit_grouped_transformer(X_group, X_value, y)
if self.use_global_model:
self.fallback_ = clone(self.transformer).fit(X_value)
return self
def __transform_single_group(self, group, X):
"""Transform a single group by getting its transformer from the fitted dict"""
# Keep track of the original index such that we can sort in __transform_groups
index = X.index
try:
group_transformer = self.transformers_[group]
except KeyError:
if self.fallback_:
group_transformer = self.fallback_
else:
raise ValueError(
f"Found new group {group} during transform with use_global_model = False"
)
return pd.DataFrame(group_transformer.transform(X)).set_index(index)
def __transform_groups(self, X_group: pd.DataFrame, X_value: np.array):
"""Transform all groups"""
# Reset indices such that they are the same in X_group (reset in __check_grouping_columns),
# this way we can track the order of the result
X_value = | pd.DataFrame(X_value) | pandas.DataFrame |
# python 2/3 compatibility
from __future__ import division, print_function
import sys
import os.path
import numpy
import pandas
import copy
import difflib
import scipy
import collections
import json
# package imports
import rba
from .rba import RbaModel, ConstraintMatrix, Solver
from .rba_SimulationData import RBA_SimulationData
from .rba_SimulationParameters import RBA_SimulationParameters
from .rba_ModelStructure import RBA_ModelStructure
from .rba_Problem import RBA_Problem
from .rba_Matrix import RBA_Matrix
from .rba_LP import RBA_LP
from .rba_FBA import RBA_FBA
from .rba_LogBook import RBA_LogBook
class RBA_Session(object):
"""
Top level of the RBA API.
Attributes
----------
xml_dir : str
Current Growth rate as numeric value
model : rba.RbaModel
Current Growth rate as numeric value
matrices : rba.ConstraintMatrix
Current Growth rate as numeric value
solver : rba.Solver
Current Growth rate as numeric value
Problem : rbatools.RBA_Problem
Current Growth rate as numeric value
Medium : dict
Current Growth rate as numeric value
ModelStructure : rbatools.RBA_ModelStructure
Current Growth rate as numeric value
Results : dict
Current Growth rate as numeric value
Parameters : dict
Current Growth rate as numeric value
SimulationData : rbatools.RBA_SimulationData
Current Growth rate as numeric value
SimulationParameters : rbatools.RBA_SimulationParameters
Current Growth rate as numeric value
Methods
----------
__init__(xml_dir)
Creates RBA_Session object from files
Parameters
----------
xml_dir : str
Path to the directory where rba-model files are located.
rebuild_from_model()
Rebuilds computational model-representation (matrix) from own attribute "model" (rba.RbaModel-object).
reloadModel()
Reloads model from xml-files and then rebuild computational model-representation (matrix).
recordResults(runName)
Records Simulation output for further use.
and strores them in own 'Results'-attribute as pandas.DataFrames in a dictionary with the respective run-name being a column in all DataFrames.
Parameters
----------
runName : str
Name of observation/condition.
Serves as ID for all Data, originating from these.
recordParameters(runName)
Records Simulation parameters (LP-coefficients etc.) for further use.
and strores them in own 'Parameters'-attribute as pandas.DataFrames in a dictionary with the respective run-name being a column in all DataFrames.
Parameters
----------
runName : str
Name of observation/condition.
Serves as ID for all Data, originating from these.
clearResults()
Removes all previosly recorded results and deletes own 'Results'-attribute.
clearParameters()
Removes all previosly recorded parameters and deletes own 'Parameters'-attribute.
writeResults(session_name='', digits=10)
Creates SimulationData and SimulationParameters objects from recordings ('Results'.'Parameters').
Stores them as rbatools.RBA_SimulationData
and rbatools.RBA_SimulationParameters objects as attributes.
Access via attributes .SimulationData and SimulationParameters respectively.
Parameters
----------
digits : int
Number of decimal places in the numeric results
Default: 10
session_name : str
Name of Simulation session.
Default: ''
returnExchangeFluxes()
Returns a dictonary with the exchang-rates of boundary-metabolites.
Returns
-------
Dictonary with exchange-keys and respective -rates.
ConstraintSaturation(constraints=None)
Determines the saturation of model constraints at current solution.
Parameters
----------
constraints : str or list of str
Specifies constraints(s) for which the saturation is to be determined.
Default-value = None:
All model-constraints are taken
Returns
-------
Pandas DataFrame with constraint-names as indices and the columns 'LHS', 'RHS', and 'Saturation'.
'LHS': The sum over the respoctive constraint-row multiplied elementwise with the solution vector.
'RHS': The value of the problem's righthand side, correesponding to the respective constraint.
'Saturation': The saturation of the respective constraint ('LHS'/'RHS').
(Equality constraints are always saturated)
setMedium(changes)
Sets the concentration of specified growth-substrate(s) in medium.
Parameters
----------
changes : dict
Keys : ID of metabolite(s) in medium.
Values : New concention(s)
setMu(Mu)
Sets growth-rate to specified value.
Parameters
----------
Mu : float
Growth rate
doSolve(runName='DontSave')
Solves problem to find solution.
Does the same as rbatools.RBA_Problem.solveLP().
Just has some automatic option for results-recording.
Parameters
----------
runName : str
Name of observation.
Serves as ID for all data, originating from this run.
Special values :
'DontSave' : Results are not recorded
'Auto' : Results are automatically recorded
and appended to existing ones.
Named with number.
Any other string: Results are recorded under this name.
Default: 'DontSave'
findMaxGrowthRate(precision=0.0005, max=4, start_value=None, recording=False)
Applies dichotomy-search to find the maximal feasible growth-rate.
Parameters
----------
precision : float
Numberic precision with which maximum is approximated.
Default : 0.00001
max : float
Defines the highest growth rate to be screened for.
Default=4
start_value : float
Defines a starting-value of the search for the maximum growth-rate.
A close starting-value reduces the required number of iterations, for the algorithm to converge.
If not provided search starts at growth-rate 0.
Default = None
recording : bool
Records intermediate feasible solutions
while approaching the maximum growth-rate.
Default : False
Returns
-------
maximum feasible growth rate as float.
knockOut(gene)
Simulates a gene knock out.
Constrains all variables in the LP-problem (enzymes, other machineries), which require this gene(s), to zero.
Parameters
----------
gene : str or list of strings
ID(s) of model-proteins to be knocked out.
Can either be gene-identifier, represented as ID or ProtoID of proteins in rbatools.protein_bloc.ProteinBlock.Elements class (depends on whether protein-isoforms are considered).
FeasibleRange(variables=None)
Determines the feasible range of model variables.
Parameters
----------
variables : str or list of str
Specifies variable(s) for which the feasible range is to be determined.
Default-value = None:
All model-variables are taken
Returns
-------
Dictionary with variable-names as keys and other dictionaries as values.
The 'inner' dictionaries hold keys 'Min' and 'Max'
with values representing lower and upper bound of feasible range respectively.
E.g. : {'variableA':{'Min':42 , 'Max':9000},
'variableB':{'Min':-9000 , 'Max':-42}}
ParetoFront(variable_X, variable_Y, N=10, sign_VY='max')
Determine Pareto front of two model variables.
Parameters
----------
variable_X : str
ID of variable, representing the X-coordinate of the Pareto-front
variable_Y : str
ID of variable, representing the Y-coordinate of the Pareto-front
N : int
Number of intervals within the feasible range of variable_X.
Default-value=10.
sign_VY : str
'max': variable_Y is maximised
'min': variable_Y is minimised
Returns
-------
Pandas DataFrame with columns named after the two input variables
and 'N' rows. Each row represents an interval on the Pareto front.
Entries on each row are the X and Y coordinate on the Pareto front,
representing the values of the two variables.
"""
def __init__(self, xml_dir):
"""
Creates RBA_Session object from files
Parameters
----------
xml_dir : str
Path to the directory where rba-model files are located.
"""
self.xml_dir = xml_dir
self.LogBook = RBA_LogBook('Controler')
if not hasattr(self, 'ModelStructure'):
if os.path.isfile(str(self.xml_dir+'/ModelStructure.json')):
self.ModelStructure = RBA_ModelStructure()
with open(str(self.xml_dir+'/ModelStructure.json'), 'r') as myfile:
data = myfile.read()
self.ModelStructure.fromJSON(inputString=data)
else:
self.build_ModelStructure()
self.model = RbaModel.from_xml(input_dir=xml_dir)
self.matrices = ConstraintMatrix(model=self.model)
self.solver = Solver(matrix=self.matrices)
self.LogBook.addEntry('Model loaded from {}.'.format(self.xml_dir))
self.Problem = RBA_Problem(solver=self.solver)
medium = pandas.read_csv(xml_dir+'/medium.tsv', sep='\t')
self.Medium = dict(zip(list(medium.iloc[:, 0]), [float(i)
for i in list(medium.iloc[:, 1])]))
self.Mu = self.Problem.Mu
self.ExchangeMap = buildExchangeMap(self)
def build_ModelStructure(self):
self.ModelStructure = RBA_ModelStructure()
self.ModelStructure.fromFiles(xml_dir=self.xml_dir)
self.ModelStructure.exportJSON(path=self.xml_dir)
def addExchangeReactions(self):
"""
Adds explicit exchange-reactions of boundary-metabolites to RBA-problem, named R_EX_ followed by metabolite name (without M_ prefix).
"""
Mets_external = [m.id for m in self.model.metabolism.species if m.boundary_condition]
Mets_internal = [m.id for m in self.model.metabolism.species if not m.boundary_condition]
Reactions = [r.id for r in self.model.metabolism.reactions]
full_S = rba.core.metabolism.build_S(
Mets_external+Mets_internal, self.model.metabolism.reactions)
S_M_ext = full_S[:len(Mets_external), ].toarray()
col_indices_toremove = []
for i in range(S_M_ext.shape[1]):
s_col_uniques = list(set(list(S_M_ext[:, i])))
if len(s_col_uniques) == 1:
if s_col_uniques[0] == 0:
col_indices_toremove.append(i)
RemainingReactions = [i for i in Reactions if Reactions.index(
i) not in col_indices_toremove]
S_ext = numpy.delete(S_M_ext, col_indices_toremove, axis=1)
A = numpy.concatenate((S_ext, numpy.eye(len(Mets_external))), axis=1, out=None)
ColNames = RemainingReactions+[str('R_EX_'+i.split('M_')[-1]) for i in Mets_external]
# print(str('R_EX_'+i.split('M_')[-1]))
LBs = list([self.Problem.LP.LB[self.Problem.LP.col_names.index(i)]
for i in RemainingReactions]+[-10000]*len(Mets_external))
UBs = list([self.Problem.LP.UB[self.Problem.LP.col_names.index(i)]
for i in RemainingReactions]+[10000]*len(Mets_external))
b = [0]*len(Mets_external)
f = list([self.Problem.LP.f[self.Problem.LP.col_names.index(i)]
for i in RemainingReactions]+[0]*len(Mets_external))
ExchangeMatrix = RBA_Matrix()
ExchangeMatrix.A = scipy.sparse.coo_matrix(A)
ExchangeMatrix.b = numpy.array([0]*len(Mets_external))
ExchangeMatrix.f = numpy.array(f)
ExchangeMatrix.LB = numpy.array(LBs)
ExchangeMatrix.UB = numpy.array(UBs)
ExchangeMatrix.row_signs = ['E']*len(Mets_external)
ExchangeMatrix.row_names = Mets_external
ExchangeMatrix.col_names = ColNames
ExchangeMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=ExchangeMatrix)
self.ExchangeReactionMap = dict(
zip(Mets_external, [str('R_EX_'+i.split('M_')[-1]) for i in Mets_external]))
def rebuild_from_model(self):
"""
Rebuilds computational model-representation (matrix) from own attribute "model" (rba.RbaModel-object).
"""
self.LogBook.addEntry('Model rebuilt.')
self.matrices = ConstraintMatrix(model=self.model)
self.solver = Solver(matrix=self.matrices)
self.Problem = RBA_Problem(solver=self.solver)
self.setMedium(changes=self.Medium)
def reloadModel(self):
"""
Reloads model from xml-files and then rebuild computational model-representation (matrix).
"""
self.LogBook.addEntry('Model reloaded from {}.'.format(self.xml_dir))
self.model = RbaModel.from_xml(input_dir=self.xml_dir)
self.rebuild_from_model()
def recordResults(self, runName):
"""
Records Simulation output for further use.
and strores them in own 'Results'-attribute as pandas.DataFrames in a dictionary with the respective run-name being a column in all DataFrames.
Parameters
----------
runName : str
Name of observation/condition.
Serves as ID for all Data, originating from these.
"""
self.LogBook.addEntry('Solution recorded under {}.'.format(runName))
if not hasattr(self, 'Results'):
self.Results = {'Reactions': pandas.DataFrame(index=list(self.ModelStructure.ReactionInfo.Elements.keys())),
'Enzymes': pandas.DataFrame(index=list(self.ModelStructure.EnzymeInfo.Elements.keys())),
'Processes': pandas.DataFrame(index=[self.ModelStructure.ProcessInfo.Elements[i]['ID']+'_machinery' for i in self.ModelStructure.ProcessInfo.Elements.keys()]),
'Proteins': pandas.DataFrame(index=list(self.ModelStructure.ProteinMatrix['Proteins'])),
'ProtoProteins': pandas.DataFrame(index=list(self.ModelStructure.ProteinGeneMatrix['ProtoProteins'])),
'Constraints': pandas.DataFrame(index=self.Problem.LP.row_names),
'SolutionType': pandas.DataFrame(index=['SolutionType']),
'Mu': pandas.DataFrame(index=['Mu']),
'ObjectiveFunction': pandas.DataFrame(index=self.Problem.LP.col_names),
'ObjectiveValue': pandas.DataFrame(index=['ObjectiveValue']),
'ExchangeFluxes': pandas.DataFrame(index=list(self.ExchangeMap.keys()))}
Exchanges = self.returnExchangeFluxes()
for i in Exchanges.keys():
self.Results['ExchangeFluxes'].loc[i, runName] = Exchanges[i]
self.Results['Reactions'][runName] = self.Results['Reactions'].index.map(
{i: self.Problem.SolutionValues[i] for i in list(self.Results['Reactions'].index)})
self.Results['Enzymes'][runName] = self.Results['Enzymes'].index.map(
{i: self.Problem.SolutionValues[i] for i in list(self.Results['Enzymes'].index)})
self.Results['Processes'][runName] = self.Results['Processes'].index.map(
{i: self.Problem.SolutionValues[i] for i in list(self.Results['Processes'].index)})
self.Results['Constraints'][runName] = self.Results['Constraints'].index.map(
{i: self.Problem.DualValues[i] for i in self.Problem.LP.row_names})
self.Results['Proteins'][runName] = self.Results['Proteins'].index.map(
ProteomeRecording(self, runName))
self.Results['ProtoProteins'][runName] = self.Results['ProtoProteins'].index.map(
ProtoProteomeRecording(self, runName, self.Results['Proteins']))
self.Results['SolutionType'][runName] = self.Problem.SolutionType
self.Results['Mu'][runName] = self.Problem.Mu
self.Results['ObjectiveFunction'][runName] = list(self.Problem.getObjective().values())
self.Results['ObjectiveValue'][runName] = self.Problem.ObjectiveValue
def recordParameters(self, runName):
"""
Records Simulation parameters (LP-coefficients etc.) for further use.
and strores them in own 'Parameters'-attribute as pandas.DataFrames in a dictionary with the respective run-name being a column in all DataFrames.
Parameters
----------
runName : str
Name of observation/condition.
Serves as ID for all Data, originating from these.
"""
self.LogBook.addEntry('Coefficients recorded under {}.'.format(runName))
EnzymeCapacities = self.get_parameter_values(
parameter_type='enzyme_efficiencies', species=None, output_format='dict')
ProcessCapacities = self.get_parameter_values(
parameter_type='machine_efficiencies', species=None, output_format='dict')
CompartmentCapacities = self.get_parameter_values(
parameter_type='maximal_densities', species=None, output_format='dict')
TargetValues = self.get_parameter_values(
parameter_type='target_values', species=None, output_format='dict')
if not hasattr(self, 'Parameters'):
self.Parameters = {'EnzymeEfficiencies_FW': pandas.DataFrame(index=list(EnzymeCapacities.keys())),
'EnzymeEfficiencies_BW': pandas.DataFrame(index=list(EnzymeCapacities.keys())),
'ProcessEfficiencies': pandas.DataFrame(index=list(ProcessCapacities.keys())),
'CompartmentCapacities': pandas.DataFrame(index=list(CompartmentCapacities.keys())),
'Medium': pandas.DataFrame(index=self.Medium.keys()),
'TargetValues': pandas.DataFrame(index=[TargetValues[i]['Target_id'] for i in list(TargetValues.keys())])}
self.Parameters['EnzymeEfficiencies_FW'][runName] = self.Parameters['EnzymeEfficiencies_FW'].index.map({i: list(
EnzymeCapacities[i]['Forward'].values())[0] for i in list(EnzymeCapacities.keys()) if len(list(EnzymeCapacities[i]['Forward'].values())) > 0})
self.Parameters['EnzymeEfficiencies_BW'][runName] = self.Parameters['EnzymeEfficiencies_BW'].index.map({i: list(
EnzymeCapacities[i]['Backward'].values())[0] for i in list(EnzymeCapacities.keys()) if len(list(EnzymeCapacities[i]['Forward'].values())) > 0})
self.Parameters['ProcessEfficiencies'][runName] = self.Parameters['ProcessEfficiencies'].index.map(
{i: list(ProcessCapacities[i].values())[0] for i in list(ProcessCapacities.keys()) if len(list(ProcessCapacities[i].values())) > 0})
self.Parameters['CompartmentCapacities'][runName] = self.Parameters['CompartmentCapacities'].index.map(
{i: list(CompartmentCapacities[i].values())[0] for i in list(CompartmentCapacities.keys()) if len(list(CompartmentCapacities[i].values())) > 0})
self.Parameters['Medium'][runName] = self.Parameters['Medium'].index.map(self.Medium)
self.Parameters['TargetValues'][runName] = self.Parameters['TargetValues'].index.map(
{TargetValues[i]['Target_id']: list(TargetValues[i]['Target_value'].values())[0] for i in list(TargetValues.keys()) if len(list(TargetValues[i]['Target_value'].values())) > 0})
def clearResults(self):
"""
Removes all previosly recorded results and deletes own 'Results'-attribute.
"""
self.LogBook.addEntry('Results cleared.')
delattr(self, 'Results')
def clearParameters(self):
"""
Removes all previosly recorded parameters and deletes own 'Parameters'-attribute.
"""
self.LogBook.addEntry('Parameters cleared.')
delattr(self, 'Parameters')
def writeResults(self, session_name='', digits=5, loggingIntermediateSteps=False):
"""
Creates SimulationData and SimulationParameters objects from recordings ('Results'.'Parameters').
Stores them as rbatools.RBA_SimulationData
and rbatools.RBA_SimulationParameters objects as attributes.
Access via attributes .SimulationData and SimulationParameters respectively.
Parameters
----------
digits : int
Number of decimal places in the numeric results
Default: 10
session_name : str
Name of Simulation session.
Default: ''
"""
self.LogBook.addEntry('Data written under {}.'.format(session_name))
if hasattr(self, 'Results'):
self.Results['uniqueReactions'] = mapIsoReactions(Controller=self)
self.Results['SolutionType'] = self.Results['SolutionType']
self.Results['Mu'] = self.Results['Mu'].round(digits)
self.Results['ObjectiveFunction'] = self.Results['ObjectiveFunction'].loc[(
self.Results['ObjectiveFunction'] != 0).any(axis=1)].round(digits)
self.Results['ObjectiveValue'] = self.Results['ObjectiveValue'].round(digits)
self.Results['Proteins'] = self.Results['Proteins'].round(digits)
self.Results['uniqueReactions'] = self.Results['uniqueReactions'].round(digits)
self.Results['Reactions'] = self.Results['Reactions'].round(digits)
self.Results['Enzymes'] = self.Results['Enzymes'].round(digits)
self.Results['Processes'] = self.Results['Processes'].round(digits)
self.Results['Constraints'] = self.Results['Constraints'].round(digits)
self.Results['ExchangeFluxes'] = self.Results['ExchangeFluxes'].round(digits)
self.SimulationData = RBA_SimulationData(StaticData=self.ModelStructure)
self.SimulationData.fromSimulationResults(Controller=self, session_name=session_name)
if hasattr(self, 'Parameters'):
self.Parameters['EnzymeEfficiencies_FW'] = self.Parameters['EnzymeEfficiencies_FW'].round(
digits)
self.Parameters['EnzymeEfficiencies_BW'] = self.Parameters['EnzymeEfficiencies_BW'].round(
digits)
self.Parameters['ProcessEfficiencies'] = self.Parameters['ProcessEfficiencies'].round(
digits)
self.Parameters['CompartmentCapacities'] = self.Parameters['CompartmentCapacities'].round(
digits)
self.Parameters['TargetValues'] = self.Parameters['TargetValues'].round(digits)
self.Parameters['Medium'] = self.Parameters['Medium'].loc[(
self.Parameters['Medium'] != 0).any(axis=1)].round(digits)
self.SimulationParameters = RBA_SimulationParameters(StaticData=self.ModelStructure)
self.SimulationParameters.fromSimulationResults(Controller=self)
def returnExchangeFluxes(self):
"""
Returns a dictonary with the exchang-rates of boundary-metabolites.
Returns
-------
Dictonary with exchange-keys and respective -rates.
"""
out = {}
for j in self.ExchangeMap.keys():
netflux = 0
for k in self.ExchangeMap[j].keys():
netflux += self.ExchangeMap[j][k]*self.Problem.SolutionValues[k]
if netflux != 0:
out[j] = netflux
return(out)
def ConstraintSaturation(self, constraints=None):
"""
Determines the saturation of model constraints at current solution.
Parameters
----------
constraints : str or list of str
Specifies constraints(s) for which the saturation is to be determined.
Default-value = None:
All model-constraints are taken
Returns
-------
Pandas DataFrame with constraint-names as indices and the columns 'LHS', 'RHS', and 'Saturation'.
'LHS': The sum over the respoctive constraint-row multiplied elementwise with the solution vector.
'RHS': The value of the problem's righthand side, correesponding to the respective constraint.
'Saturation': The saturation of the respective constraint ('LHS'/'RHS').
(Equality constraints are always saturated)
"""
if constraints is None:
ConstraintsInQuestion = self.Problem.LP.row_names
else:
if isinstance(constraints, list):
ConstraintsInQuestion = constraints
elif isinstance(constraints, str):
ConstraintsInQuestion = [constraints]
if len(list(constraints)) > 0:
if isinstance(constraints[0], list):
ConstraintsInQuestion = constraints[0]
if isinstance(constraints[0], str):
ConstraintsInQuestion = [constraints[0]]
if len(list(constraints)) == 0:
ConstraintsInQuestion = self.Problem.LP.row_names
rhs = self.Problem.getRighthandSideValue(ConstraintsInQuestion)
lhs = self.Problem.calculateLefthandSideValue(ConstraintsInQuestion)
RHS = list(rhs.values())
LHS = list(lhs.values())
Out = pandas.DataFrame(columns=['LHS', 'RHS', 'Saturation'], index=ConstraintsInQuestion)
for i in ConstraintsInQuestion:
lhval = LHS[self.Problem.LP.rowIndicesMap[i]]
rhval = RHS[self.Problem.LP.rowIndicesMap[i]]
sat = numpy.nan
if rhval != 0:
sat = lhval/rhval
Out.loc[i, 'LHS'] = lhval
Out.loc[i, 'RHS'] = rhval
Out.loc[i, 'Saturation'] = sat
self.LogBook.addEntry(
'Saturation of constraint {} determined to be {}.'.format(i, sat))
return(Out)
def setMedium(self, changes, loggingIntermediateSteps=False):
"""
Sets the concentration of specified growth-substrate(s) in medium.
Parameters
----------
changes : dict
Keys : ID of metabolite(s) in medium.
Values : New concention(s)
"""
for species in (changes.keys()):
self.Medium[species] = float(changes[species])
self.Problem.ClassicRBAmatrix.set_medium(self.Medium)
self.Problem.ClassicRBAmatrix.build_matrices(self.Mu)
inputMatrix = RBA_Matrix()
inputMatrix.loadMatrix(matrix=self.Problem.ClassicRBAmatrix)
self.Problem.LP.updateMatrix(matrix=inputMatrix, Ainds=MediumDependentCoefficients_A(
self), Binds=[], CTinds=[], LBinds=None, UBinds=None)
def setMu(self, Mu, loggingIntermediateSteps=False):
"""
Sets growth-rate to specified value.
Parameters
----------
Mu : float
Growth rate
"""
self.LogBook.addEntry('Growth-rate changed:{} --> {}'.format(self.Mu, float(Mu)))
self.Problem.setMu(Mu=float(Mu), ModelStructure=self.ModelStructure,
logging=loggingIntermediateSteps)
self.Mu = float(Mu)
def doSolve(self, runName='DontSave', loggingIntermediateSteps=False):
"""
Solves problem to find solution.
Does the same as rbatools.RBA_Problem.solveLP().
Just has some automatic option for results-recording.
Parameters
----------
runName : str
Name of observation.
Serves as ID for all data, originating from this run.
Special values :
'DontSave' : Results are not recorded
'Auto' : Results are automatically recorded
and appended to existing ones.
Named with number.
Any other string: Results are recorded under this name.
Default: 'DontSave'
"""
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
if runName is not 'DontSave':
if runName is 'Auto':
if hasattr(self, 'Results'):
name = str(self.Results['Reactions'].shape[1]+1)
if not hasattr(self, 'Results'):
name = '1'
if runName is not 'Auto':
name = runName
self.recordResults(runName=name)
def findMaxGrowthRate(self, precision=0.0005, max=4, start_value=None, recording=False, loggingIntermediateSteps=False):
"""
Applies dichotomy-search to find the maximal feasible growth-rate.
Parameters
----------
precision : float
Numberic precision with which maximum is approximated.
Default : 0.00001
max : float
Defines the highest growth rate to be screened for.
Default=4
start_value : float
Defines a starting-value of the search for the maximum growth-rate.
A close starting-value reduces the required number of iterations, for the algorithm to converge.
If not provided search starts at growth-rate 0.
Default = None
recording : bool
Records intermediate feasible solutions
while approaching the maximum growth-rate.
Default : False
Returns
-------
maximum feasible growth rate as float.
"""
minMu = 0
maxMu = max
if start_value is None:
testMu = minMu
else:
testMu = start_value
iteration = 0
while (maxMu - minMu) > precision:
self.setMu(Mu=testMu)
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
iteration += 1
if recording:
self.recordResults('DichotomyMu_iteration_'+str(iteration))
minMu = testMu
else:
maxMu = testMu
testMu = numpy.mean([maxMu, minMu])
self.LogBook.addEntry('Maximal growth-rate found to be: {}.'.format(minMu))
if minMu == max:
print('Warning: Maximum growth rate might exceed specified range. Try rerunning this method with larger max-argument.')
self.setMu(Mu=minMu)
self.Problem.solveLP(logging=False)
self.Problem.SolutionType = 'GrowthRate_maximization'
return(minMu)
def knockOut(self, gene, loggingIntermediateSteps=False):
"""
Simulates a gene knock out.
Constrains all variables in the LP-problem (enzymes, other machineries), which require this gene(s), to zero.
Parameters
----------
gene : str or list of strings
ID(s) of model-proteins to be knocked out.
Can either be gene-identifier, represented as ID or ProtoID of proteins in rbatools.protein_bloc.ProteinBlock.Elements class (depends on whether protein-isoforms are considered).
"""
if type(gene) is str:
genes = [gene]
if type(gene) is list:
genes = gene
isoform_genes = [g for g in genes if g in list(self.ModelStructure.ProteinInfo.Elements.keys(
))]+[i for g in genes for i in self.ModelStructure.ProteinInfo.Elements.keys() if self.ModelStructure.ProteinInfo.Elements[i]['ProtoID'] == g]
for g in isoform_genes:
self.LogBook.addEntry('Gene {} knocked out.'.format(g))
ConsumersEnzymes = self.ModelStructure.ProteinInfo.Elements[g]['associatedEnzymes']
for i in ConsumersEnzymes:
LikeliestVarName = difflib.get_close_matches(i, self.Problem.LP.col_names, 1)[0]
self.Problem.setLB(inputDict={LikeliestVarName: 0},
logging=loggingIntermediateSteps)
self.Problem.setUB(inputDict={LikeliestVarName: 0},
logging=loggingIntermediateSteps)
ConsumersProcess = self.ModelStructure.ProteinInfo.Elements[g]['SupportsProcess']
for i in ConsumersProcess:
LikeliestVarName = difflib.get_close_matches(
str(self.ModelStructure.ProcessInfo.Elements[i]['ID']+'_machinery'), self.Problem.LP.col_names, 1)[0]
self.Problem.setLB(inputDict={LikeliestVarName: 0},
logging=loggingIntermediateSteps)
self.Problem.setUB(inputDict={LikeliestVarName: 0},
logging=loggingIntermediateSteps)
def FeasibleRange(self, variables=None, loggingIntermediateSteps=False):
"""
Determines the feasible range of model variables.
Parameters
----------
variables : str or list of str
Specifies variable(s) for which the feasible range is to be determined.
Default-value = None:
All model-variables are taken
Returns
-------
Dictionary with variable-names as keys and other dictionaries as values.
The 'inner' dictionaries hold keys 'Min' and 'Max'
with values representing lower and upper bound of feasible range respectively.
E.g. : {'variableA':{'Min':42 , 'Max':9000},
'variableB':{'Min':-9000 , 'Max':-42}}
"""
if variables is None:
VariablesInQuestion = self.Problem.LP.col_names
else:
if isinstance(variables, list):
VariablesInQuestion = variables
elif isinstance(variables, str):
VariablesInQuestion = [variables]
out = {}
for i in VariablesInQuestion:
min = numpy.nan
max = numpy.nan
self.Problem.clearObjective(logging=loggingIntermediateSteps)
self.Problem.setObjectiveCoefficients(
inputDict={i: 1.0}, logging=loggingIntermediateSteps)
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
min = self.Problem.SolutionValues[i]
self.Problem.setObjectiveCoefficients(
inputDict={i: -1.0}, logging=loggingIntermediateSteps)
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
max = self.Problem.SolutionValues[i]
out.update({i: {'Min': min, 'Max': max}})
self.LogBook.addEntry(
'Feasible-range of {} determined to be between {} and {}.'.format(i, min, max))
return(out)
def ParetoFront(self, variable_X, variable_Y, N=10, sign_VY='max', loggingIntermediateSteps=False):
"""
Determine Pareto front of two model variables.
Parameters
----------
variable_X : str
ID of variable, representing the X-coordinate of the Pareto-front
variable_Y : str
ID of variable, representing the Y-coordinate of the Pareto-front
N : int
Number of intervals within the feasible range of variable_X.
Default-value=10.
sign_VY : str
'max': variable_Y is maximised
'min': variable_Y is minimised
Returns
-------
Pandas DataFrame with columns named after the two input variables
and 'N' rows. Each row represents an interval on the Pareto front.
Entries on each row are the X and Y coordinate on the Pareto front,
representing the values of the two variables.
"""
if variable_X not in self.Problem.LP.col_names:
print('Chosen Element not among problem variables')
return
if variable_Y not in self.Problem.LP.col_names:
print('Chosen Element not among problem variables')
return
FR = self.FeasibleRange(variable_X)
cMin = FR[variable_X]['Min']
cMax = FR[variable_X]['Max']
concentrations = [float(cMin+(cMax-cMin)*i/N) for i in range(N+1)]
Out = pandas.DataFrame(columns=[variable_X, variable_Y])
oldLB = self.Problem.getLB(variable_X)
oldUB = self.Problem.getUB(variable_X)
iteration = -1
for conc in concentrations:
iteration += 1
self.Problem.setLB(inputDict={variable_X: conc}, logging=loggingIntermediateSteps)
self.Problem.setUB(inputDict={variable_X: conc}, logging=loggingIntermediateSteps)
self.Problem.clearObjective(logging=loggingIntermediateSteps)
if sign_VY == 'max':
self.Problem.setObjectiveCoefficients(
inputDict={variable_Y: -1}, logging=loggingIntermediateSteps)
if sign_VY == 'min':
self.Problem.setObjectiveCoefficients(
inputDict={variable_Y: 1}, logging=loggingIntermediateSteps)
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
max = abs(self.Problem.ObjectiveValue)
else:
max = numpy.nan
self.Problem.setLB(inputDict=oldLB, logging=loggingIntermediateSteps)
self.Problem.setUB(inputDict=oldUB, logging=loggingIntermediateSteps)
Out.loc[iteration, variable_X] = conc
Out.loc[iteration, variable_Y] = max
self.LogBook.addEntry(
'Pareto-front between {} and {} determined.'.format(variable_X, variable_Y))
return(Out)
### !!! Docstring ###
def buildFBA(self, type='classic', objective='classic', maintenanceToBM=False):
"""
Derives and constructs FBA-problem from RBA-problem and stores it under attribute 'FBA'.
Parameters
----------
type : str
objective : str
maintenanceToBM : boolean
"""
RBAproblem = self.Problem.LP
A = RBAproblem.A.toarray()
if type == 'classic':
Cols2remove = list(set([RBAproblem.col_names.index(i) for i in RBAproblem.col_names if not i.startswith('R_') and not i.startswith('M_') and not i.endswith('_synthesis')]
+ [RBAproblem.col_names.index(i) for i in RBAproblem.col_names if '_duplicate_' in i]
+ [RBAproblem.col_names.index(i) for i in RBAproblem.col_names if 'enzyme' in i]))
Rows2remove = [RBAproblem.row_names.index(
i) for i in RBAproblem.row_names if not i.startswith('M_')]
elif type == 'parsi':
Cols2remove = list(set([RBAproblem.col_names.index(i) for i in RBAproblem.col_names if not i.startswith(
'R_') and not i.startswith('M_') and not i.endswith('_synthesis')]+[RBAproblem.col_names.index(i) for i in RBAproblem.col_names if '_duplicate_' in i]))
Rows2remove = [RBAproblem.row_names.index(
i) for i in RBAproblem.row_names if not i.startswith('R_') and not i.startswith('M_')]
if objective == 'classic':
if 'R_maintenance_atp' in RBAproblem.col_names:
Cols2remove.append(RBAproblem.col_names.index('R_maintenance_atp'))
Anew = numpy.delete(A, Cols2remove, axis=1)
col_namesNew = list(numpy.delete(RBAproblem.col_names, Cols2remove))
LBnew = numpy.delete(RBAproblem.LB, Cols2remove)
UBnew = numpy.delete(RBAproblem.UB, Cols2remove)
fNew = numpy.delete(RBAproblem.f, Cols2remove)
Anew2 = numpy.delete(Anew, Rows2remove, axis=0)
row_namesNew = list(numpy.delete(RBAproblem.row_names, Rows2remove))
row_signsNew = list(numpy.delete(RBAproblem.row_signs, Rows2remove))
bNew = numpy.delete(RBAproblem.b, Rows2remove)
trnaInds = [i for i in range(len(row_namesNew)) if row_namesNew[i].startswith(
'M_') and 'trna' in row_namesNew[i]]
# bNew[trnaInds] = 0
if objective == 'targets':
col_namesNew.append('R_BIOMASS_targetsRBA')
LBnew = numpy.append(LBnew, 0)
UBnew = numpy.append(UBnew, 10000)
fNew = numpy.append(fNew, 0)
BMrxnCol = numpy.ones((len(row_namesNew), 1))
BMrxnCol[:, 0] = bNew
if maintenanceToBM:
MaintenanceTarget = LBnew[col_namesNew.index('R_maintenance_atp')]
BMrxnCol[row_namesNew.index('M_atp_c')] += MaintenanceTarget
BMrxnCol[row_namesNew.index('M_h2o_c')] += MaintenanceTarget
BMrxnCol[row_namesNew.index('M_adp_c')] -= MaintenanceTarget
BMrxnCol[row_namesNew.index('M_pi_c')] -= MaintenanceTarget
BMrxnCol[row_namesNew.index('M_h_c')] -= MaintenanceTarget
LBnew[col_namesNew.index('R_maintenance_atp')] = 0
Anew2 = numpy.append(Anew2, -BMrxnCol, axis=1)
bNew = numpy.array([0]*Anew2.shape[0])
Matrix1 = RBA_Matrix()
Matrix1.A = scipy.sparse.coo_matrix(Anew2)
Matrix1.b = bNew
Matrix1.LB = LBnew
Matrix1.UB = UBnew
Matrix1.row_signs = row_signsNew
Matrix1.row_names = row_namesNew
Matrix1.col_names = col_namesNew
Matrix1.f = fNew
if type == 'classic':
Matrix1.b = numpy.array([0]*len(row_signsNew))
LP1 = RBA_LP()
LP1.loadMatrix(Matrix1)
elif type == 'parsi':
MetaboliteRows = {i: Matrix1.row_names.index(
i) for i in Matrix1.row_names if i.startswith('M_')}
EnzymeCols = {i: Matrix1.col_names.index(
i) for i in Matrix1.col_names if i.startswith('R_') and '_enzyme' in i}
Matrix2 = RBA_Matrix()
Matrix2.A = scipy.sparse.coo_matrix(numpy.zeros((len(MetaboliteRows), len(EnzymeCols))))
Matrix2.b = numpy.array(Matrix1.b[list(MetaboliteRows.values())])
Matrix2.LB = numpy.array(Matrix1.LB[list(EnzymeCols.values())])
Matrix2.UB = numpy.array(Matrix1.UB[list(EnzymeCols.values())])
Matrix2.f = numpy.array(Matrix1.f[list(EnzymeCols.values())])
Matrix2.row_signs = [Matrix1.row_signs[i] for i in list(MetaboliteRows.values())]
Matrix2.row_names = list(MetaboliteRows.keys())
Matrix2.col_names = list(EnzymeCols.keys())
Matrix2.mapIndices()
Matrix1.b = numpy.array([0]*len(bNew))
LP1 = RBA_LP()
LP1.loadMatrix(Matrix1)
LP1.updateMatrix(Matrix2)
self.FBA = RBA_FBA(LP1)
def findMinMediumConcentration(self, metabolite, precision=0.00001, max=100, recording=False, loggingIntermediateSteps=False):
"""
Applies dichotomy-search to find the minimal feasible concentration of
growth-substrate in medium, at a previously set growth-rate.
Parameters
----------
metabolite : str
ID of metabolite in medium.
precision : float
Numberic precision with which minimum is approximated.
Default : 0.00001
max : float
Defines the highest concentration rate to be screened for.
Default=100
recording : bool
Records intermediate feasible solutions
while approaching the minimum concentration.
Default : False
Returns
-------
minimum feasible growth-substrate concentration as float.
"""
minConc = 0.0
maxConc = max
testConc = minConc
iteration = 0
oldConc = self.Medium[metabolite]
while (maxConc - minConc) > precision:
self.setMedium(changes={metabolite: testConc})
self.Problem.solveLP(logging=loggingIntermediateSteps)
if self.Problem.Solved:
iteration += 1
if recording:
run_name = 'Dichotomy_'+metabolite+'_' + \
str(testConc)+'_iteration_'+str(iteration)
self.recordResults(run_name)
maxConc = testConc
else:
minConc = testConc
testConc = numpy.mean([maxConc, minConc])
self.LogBook.addEntry(
'Minimal required {} concentration found to be: {}.'.format(metabolite, maxConc))
self.setMedium(changes={metabolite: oldConc})
return(maxConc)
def addProtein(self, input):
"""
Adds representation of individual proteins to problem.
Parameters
----------
input : dict or str
If input is str it has to be the ID of a protein in the model.
Then this protein is added to the problem an creates:
One constraint named Protein_'ID' (equality).
One variable named TotalLevel_'ID' representing the total amount.
One variable named Free_'ID'_'respectiveCompartment', this
represents the fraction of the protein not assuming any function.
It however consumes resources for synthesis (precursors and processes),
which are the same as defined in the model files.
And takes up space i the compartment as specified in the model-files
for the protein.
If input is dict it has to have two keys; 'ID' and 'UnusedProteinFraction'.
By specifying this input one can define that the unused franction of the protein
can also reside in other compartments and which processes it requires.
The value to 'ID' is the ID of a protein in the model.
The value to 'UnusedProteinFraction' is another dictionary.
This can have several keys which must be model-compartments.
For each of the keys the value is a dict holding IDs of model-processes as Keys
and process requirements as Values (numerical).
This specifies which processes each of the compartment-species of the protein
requires.
This generates the same constraint and TotalLevel-variable as with the simple input,
however a variable representing each of the compartment-species for the unused fraction
is added and incorporates the specific process requirements.
E.g: input = {'ID': 'proteinA',
'UnusedProteinFraction':{'Cytoplasm':{'Translation':100}, {'Folding':10}],
'Membrane':{'Translation':100}, {'Folding':20}, {'Secretion':100}
}
}
This adds 'proteinA' to the model, where the unused fraction can reside either in
the Cytoplasm or the Membrane. However while the cytosolic-species only requires the
processes 'Translation' and 'Folding'; the membrane-bound species also requires 'Secretion'
and occupies more folding capacity.
Then the constraint 'Protein_proteinA' is added and the 3 variables
'TotalLevel_proteinA', 'Free_proteinA_Cytoplasm' and 'Free_proteinA_Membrane'.
"""
if type(input) is str:
input = {'ID': input}
if 'ID' not in list(input.keys()):
print('Error, no protein ID provided')
return
if input['ID'] not in list(self.ModelStructure.ProteinInfo.Elements.keys()):
print('Error, protein not in model')
return
if 'UnusedProteinFraction' not in list(input.keys()):
input.update({'UnusedProteinFraction':
{self.ModelStructure.ProteinInfo.Elements[input['ID']]['Compartment']:
self.ModelStructure.ProteinInfo.Elements[input['ID']]['ProcessRequirements']}})
self.LogBook.addEntry('Protein {} added with specifications {}.'.format(
input['ID'], str(json.dumps(input))))
Muindexlist = []
## Building RBA_Matrix-object for new constraint-row, representing protein ##
UsedProtein = RBA_Matrix()
UsedProtein.A = scipy.sparse.coo_matrix(
buildUsedProteinConstraint(Controler=self, protein=input['ID']))
UsedProtein.b = numpy.array([float(0)])
UsedProtein.f = numpy.array(self.Problem.LP.f)
UsedProtein.LB = numpy.array(self.Problem.LP.LB)
UsedProtein.UB = numpy.array(self.Problem.LP.UB)
UsedProtein.row_signs = ['E']
UsedProtein.row_names = ['Protein_'+input['ID']]
UsedProtein.col_names = self.Problem.LP.col_names
## Add used protein row to problem ##
self.Problem.LP.addMatrix(matrix=UsedProtein)
## Add used protein row to reference Matrix (Mu == 1) ##
self.Problem.MuOneMatrix.addMatrix(matrix=UsedProtein)
## Building RBA_Matrix-object for new variable-col, representing total level of protein ##
TotProtein = RBA_Matrix()
TotProtein.A = scipy.sparse.coo_matrix(numpy.array(numpy.matrix(
numpy.array([float(0)]*self.Problem.LP.A.shape[0]+[float(-1)])).transpose()))
TotProtein.f = numpy.array([float(0)])
TotProtein.LB = numpy.array([float(0)])
TotProtein.UB = numpy.array([float(100000.0)])
TotProtein.b = numpy.array(list(self.Problem.LP.b)+list(UsedProtein.b))
TotProtein.row_signs = self.Problem.LP.row_signs+UsedProtein.row_signs
TotProtein.row_names = self.Problem.LP.row_names+UsedProtein.row_names
TotProtein.col_names = ['TotalLevel_'+input['ID']]
## Add total protein col to problem ##
self.Problem.LP.addMatrix(matrix=TotProtein)
## Add total protein col to reference Matrix (Mu == 1) ##
self.Problem.MuOneMatrix.addMatrix(matrix=TotProtein)
## Building RBA_Matrix-object for new variable-col,##
## representing each compartment-species of the protein ##
for comp_species in list(input['UnusedProteinFraction'].keys()):
## Initiate RBA_Matrix object##
UnusedProtein = RBA_Matrix()
UnusedProtein.col_names = ['Free_'+input['ID']+'_'+comp_species]
## Extract required processes for protein and the respective demand ##
ProcIDs = list(input['UnusedProteinFraction'][comp_species].keys())
Preq = list(input['UnusedProteinFraction'][comp_species].values())
ProcessCost = dict(
zip([self.ModelStructure.ProcessInfo.Elements[k]['ID'] for k in ProcIDs], Preq))
## Get required charged trna buildingblocks and their stoichiometry in protein ##
composition = self.ModelStructure.ProteinInfo.Elements[input['ID']]['AAcomposition']
## Extract the composition of charged trnas in terms of metabolic species ##
species = self.ModelStructure.ProcessInfo.Elements['Translation']['Components']
## Determine required metabolites and their stoichiometry in protein ##
MetaboliteCost = buildCompositionofUnusedProtein(
species=species, composition=composition)
## Assemble process and metabolite requirements into stoichiometric coloumn vector ##
## And add to RBA_Matrix object ##
colToAdd = numpy.array(numpy.matrix(numpy.array(list(MetaboliteCost.values())+list(ProcessCost.values()) +
[float(1)]+[self.ModelStructure.ProteinInfo.Elements[input['ID']]['AAnumber']])).transpose())
UnusedProtein.A = scipy.sparse.coo_matrix(colToAdd)
## Add other information to RBA_Matrix object ##
UnusedProtein.row_names = list(MetaboliteCost.keys())+[str(pc+'_capacity') for pc in list(
ProcessCost.keys())]+['Protein_'+input['ID']]+[str(comp_species + '_density')]
UnusedProtein.b = numpy.zeros(len(UnusedProtein.row_names))
UnusedProtein.row_signs = ['E']*len(UnusedProtein.row_names)
UnusedProtein.LB = numpy.array([float(0)])
UnusedProtein.UB = numpy.array([float(100000.0)])
UnusedProtein.f = numpy.array([float(0)])
self.ProteinDilutionIndices = list(
zip(list(MetaboliteCost.keys()), UnusedProtein.col_names*len(list(MetaboliteCost.keys()))))
## Add free protein col to problem ##
self.Problem.LP.addMatrix(matrix=UnusedProtein)
## Add free protein col to reference Matrix (Mu == 1) ##
self.Problem.MuOneMatrix.addMatrix(matrix=UnusedProtein)
## Find coefficients of unused protein column, subject to dilution (Metabolite and Process cost) ##
## And add them to MuDepIndices_A ##
nonZeroEntries = numpy.where(UnusedProtein.A != 0)[0]
self.Problem.MuDepIndices_A += [(UnusedProtein.row_names[i], UnusedProtein.col_names[0]) for i in nonZeroEntries if UnusedProtein.row_names[i]
!= 'Protein_'+input['ID'] and UnusedProtein.row_names[i] not in self.Problem.CompartmentDensities]
self.setMu(self.Problem.Mu)
## !!! ##
def eukaryoticDensities(self, totalAA=3.1, CompartmentRelationships=True, CompartmentComponents=False):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
Signs = ['L', 'L', 'L', 'L', 'L', 'L', 'L', 'L', 'L', 'L', 'L']
totalAA = 3.1*0.71
m_mIM = 0.66
m_mIMS = 2
m_mOM = 8
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
A[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
self.Problem.LP.A = scipy.sparse.coo_matrix(A)
A0 = self.Problem.MuOneMatrix.A.toarray()
A0[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
self.Problem.MuOneMatrix.A = scipy.sparse.coo_matrix(A0)
CompartmentMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
CompartmentMatrix.A = scipy.sparse.coo_matrix(A)
CompartmentMatrix.b = numpy.array([float(0)]*len(Compartments)+[float(1)])
CompartmentMatrix.f = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.LB = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.UB = numpy.array([float(1)]*len(Compartments))
CompartmentMatrix.row_signs = ['L']*len(Compartments)+['E']
# CompartmentMatrix.row_signs = ['E']*(len(Compartments)+1)
CompartmentMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density', 'm_density',
'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'TotalCapacity']
CompartmentMatrix.col_names = ['F_n', 'F_mIM', 'F_vM', 'F_mIMS',
'F_m', 'F_erM', 'F_mOM', 'F_x', 'F_cM', 'F_gM', 'F_c']
# CompartmentMatrix.row_signs[CompartmentMatrix.col_names.index('F_m')]='E'
if CompartmentRelationships:
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
CompartmentMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
CompartmentMatrix.row_signs += ['E', 'E', 'E']
CompartmentMatrix.b = numpy.array(list(CompartmentMatrix.b)+[float(0)]*3)
Anew[CompartmentMatrix.row_names.index(
'm_mIM'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mIMS'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mOM'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mIM'), CompartmentMatrix.col_names.index('F_mIM')] = -m_mIM
Anew[CompartmentMatrix.row_names.index(
'm_mIMS'), CompartmentMatrix.col_names.index('F_mIMS')] = -m_mIMS
Anew[CompartmentMatrix.row_names.index(
'm_mOM'), CompartmentMatrix.col_names.index('F_mOM')] = -m_mOM
CompartmentMatrix.A = scipy.sparse.coo_matrix(Anew)
self.Problem.LP.addMatrix(matrix=CompartmentMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=CompartmentMatrix)
if CompartmentComponents:
AlipidsA = numpy.zeros((7, len(Compartments)))
Alipids = RBA_Matrix()
Alipids.col_names = ['F_n', 'F_mIM', 'F_vM', 'F_mIMS',
'F_m', 'F_erM', 'F_mOM', 'F_x', 'F_cM', 'F_gM', 'F_c']
Alipids.row_names = ['M_pc_SC_c', 'M_pe_SC_c', 'M_ptd1ino_SC_c',
'M_ps_SC_c', 'M_clpn_SC_m', 'M_pa_SC_c', 'M_ergst_c']
Alipids.row_signs += ['E', 'E', 'E', 'E', 'E', 'E', 'E']
Alipids.b = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
Alipids.LB = numpy.array([float(0)]*len(Compartments))
Alipids.UB = numpy.array([float(1)]*len(Compartments))
Alipids.f = numpy.array([float(0)]*len(Compartments))
AlipidsA[Alipids.row_names.index('M_pc_SC_c'), Alipids.col_names.index(
'F_mIM')] = -0.0000883*totalAA
AlipidsA[Alipids.row_names.index('M_pe_SC_c'), Alipids.col_names.index(
'F_mIM')] = -0.00005852*totalAA
AlipidsA[Alipids.row_names.index('M_ptd1ino_SC_c'),
Alipids.col_names.index('F_mIM')] = -0.00003377*totalAA
AlipidsA[Alipids.row_names.index('M_ps_SC_c'), Alipids.col_names.index(
'F_mIM')] = -0.00000873*totalAA
AlipidsA[Alipids.row_names.index('M_clpn_SC_m'),
Alipids.col_names.index('F_mIM')] = -0.00002*totalAA
AlipidsA[Alipids.row_names.index('M_pa_SC_c'), Alipids.col_names.index(
'F_mIM')] = -0.0000039*totalAA
AlipidsA[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_mIM')] = -0.008547*totalAA
self.Problem.MuDepIndices_A += [('M_pc_SC_c', 'F_mIM'), ('M_pe_SC_c', 'F_mIM'), ('M_ptd1ino_SC_c', 'F_mIM'),
('M_ps_SC_c', 'F_mIM'), ('M_clpn_SC_m', 'F_mIM'), ('M_pa_SC_c', 'F_mIM'), ('M_ergst_c', 'F_mIM')]
AlipidsA[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_mOM')] = -0.000636*totalAA
AlipidsA[Alipids.row_names.index('M_pe_SC_c'), Alipids.col_names.index(
'F_mOM')] = -0.0004822*totalAA
AlipidsA[Alipids.row_names.index('M_ptd1ino_SC_c'),
Alipids.col_names.index('F_mOM')] = -0.0001289*totalAA
AlipidsA[Alipids.row_names.index('M_ps_SC_c'), Alipids.col_names.index(
'F_mOM')] = -0.0000167*totalAA
AlipidsA[Alipids.row_names.index('M_clpn_SC_m'), Alipids.col_names.index(
'F_mOM')] = -0.00004467*totalAA
AlipidsA[Alipids.row_names.index('M_pa_SC_c'), Alipids.col_names.index(
'F_mOM')] = -0.0000696*totalAA
self.Problem.MuDepIndices_A += [('M_pc_SC_c', 'F_mOM'), ('M_pe_SC_c', 'F_mOM'), ('M_ptd1ino_SC_c',
'F_mOM'), ('M_ps_SC_c', 'F_mOM'), ('M_clpn_SC_m', 'F_mOM'), ('M_pa_SC_c', 'F_mOM')]
Alipids.A = scipy.sparse.coo_matrix(AlipidsA)
Alipids.mapIndices()
self.Problem.LP.updateMatrix(Alipids, Ainds=[('M_pc_SC_c', 'F_mIM'), ('M_pe_SC_c', 'F_mIM'), ('M_ptd1ino_SC_c', 'F_mIM'), ('M_ps_SC_c', 'F_mIM'), ('M_clpn_SC_m', 'F_mIM'), ('M_pa_SC_c', 'F_mIM'), (
'M_ergst_c', 'F_mIM'), ('M_pc_SC_c', 'F_mOM'), ('M_pe_SC_c', 'F_mOM'), ('M_ptd1ino_SC_c', 'F_mOM'), ('M_ps_SC_c', 'F_mOM'), ('M_clpn_SC_m', 'F_mOM'), ('M_pa_SC_c', 'F_mOM')])
self.Problem.MuOneMatrix.updateMatrix(Alipids, Ainds=[('M_pc_SC_c', 'F_mIM'), ('M_pe_SC_c', 'F_mIM'), ('M_ptd1ino_SC_c', 'F_mIM'), ('M_ps_SC_c', 'F_mIM'), ('M_clpn_SC_m', 'F_mIM'), (
'M_pa_SC_c', 'F_mIM'), ('M_ergst_c', 'F_mIM'), ('M_pc_SC_c', 'F_mOM'), ('M_pe_SC_c', 'F_mOM'), ('M_ptd1ino_SC_c', 'F_mOM'), ('M_ps_SC_c', 'F_mOM'), ('M_clpn_SC_m', 'F_mOM'), ('M_pa_SC_c', 'F_mOM')])
## !!! ##
def eukaryoticDensities2(self, totalAA=3.1, CompartmentRelationships=True, CompartmentComponents=False):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
totalAA = 3.1*0.69
m_mIM = 1.11
m_mIMS = 0.7
m_mOM = 7.2
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
A[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
self.Problem.LP.A = scipy.sparse.coo_matrix(A)
A0 = self.Problem.MuOneMatrix.A.toarray()
A0[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
self.Problem.MuOneMatrix.A = scipy.sparse.coo_matrix(A0)
CompartmentMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
CompartmentMatrix.A = scipy.sparse.coo_matrix(A)
CompartmentMatrix.b = numpy.array([float(0)]*len(Compartments)+[float(1)])
CompartmentMatrix.f = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.LB = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.UB = numpy.array([float(1)]*len(Compartments))
CompartmentMatrix.row_signs = ['L']*(len(Compartments)+1)
# CompartmentMatrix.row_signs = ['E']*(len(Compartments)+1)
CompartmentMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density', 'm_density',
'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'TotalCapacity']
CompartmentMatrix.col_names = ['F_n', 'F_mIM', 'F_vM', 'F_mIMS',
'F_m', 'F_erM', 'F_mOM', 'F_x', 'F_cM', 'F_gM', 'F_c']
# CompartmentMatrix.row_signs[CompartmentMatrix.col_names.index('F_m')]='E'
if CompartmentRelationships:
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
CompartmentMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
CompartmentMatrix.row_signs += ['E', 'E', 'E']
CompartmentMatrix.b = numpy.array(list(CompartmentMatrix.b)+[float(0)]*3)
Anew[CompartmentMatrix.row_names.index(
'm_mIM'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mIMS'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mOM'), CompartmentMatrix.col_names.index('F_m')] = float(1)
Anew[CompartmentMatrix.row_names.index(
'm_mIM'), CompartmentMatrix.col_names.index('F_mIM')] = -m_mIM
Anew[CompartmentMatrix.row_names.index(
'm_mIMS'), CompartmentMatrix.col_names.index('F_mIMS')] = -m_mIMS
Anew[CompartmentMatrix.row_names.index(
'm_mOM'), CompartmentMatrix.col_names.index('F_mOM')] = -m_mOM
CompartmentMatrix.A = scipy.sparse.coo_matrix(Anew)
self.Problem.LP.addMatrix(matrix=CompartmentMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=CompartmentMatrix)
if CompartmentComponents:
PC_mIM = 0.0000883
PE_mIM = 0.00005852
PI_mIM = 0.00003377
PS_mIM = 0.00000873
CL_mIM = 0.00002
PA_mIM = 0.0000039
ES_mIM = 0.008547
PC_mOM = 0.000636
PE_mOM = 0.0004822
PI_mOM = 0.0001289
PS_mOM = 0.0000167
CL_mOM = 0.00004467
PA_mOM = 0.0000696
ES_mOM = 0.0
ConstraintMatrix = numpy.zeros((7, 0))
Alipids = RBA_Matrix()
Alipids.col_names = []
Alipids.row_names = ['M_pc_SC_c', 'M_pe_SC_c', 'M_ptd1ino_SC_c',
'M_ps_SC_c', 'M_clpn_SC_m', 'M_pa_SC_c', 'M_ergst_c']
Alipids.row_signs = [
self.Problem.LP.row_signs[self.Problem.LP.row_names.index(i)] for i in Alipids.row_names]
Alipids.b = numpy.array(
[self.Problem.LP.b[self.Problem.LP.row_names.index(i)] for i in Alipids.row_names])
Alipids.LB = numpy.array([])
Alipids.UB = numpy.array([])
Alipids.f = numpy.array([])
MudepIndices = []
for pc in self.ModelStructure.ProcessInfo.Elements.keys():
if self.ModelStructure.ProcessInfo.Elements[pc]['ID'] not in self.Problem.LP.col_names:
continue
ConstraintMatrixNew = numpy.zeros(
(ConstraintMatrix.shape[0], ConstraintMatrix.shape[1]+1))
ConstraintMatrixNew[:, 0:ConstraintMatrix.shape[1]] = ConstraintMatrix
Alipids.col_names.append(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
# Alipids.LB = numpy.array(list(Alipids.LB).append(list(self.Problem.LP.LB)[
# self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])]))
# Alipids.UB = numpy.array(list(Alipids.UB).append(list(self.Problem.LP.UB)[
# self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])]))
# Alipids.f = numpy.array(list(Alipids.f).append(list(self.Problem.LP.f)[
# self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])]))
Alipids.LB = numpy.concatenate([Alipids.LB, numpy.array(
list(self.Problem.LP.LB)[self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])])])
Alipids.UB = numpy.concatenate([Alipids.UB, numpy.array(
list(self.Problem.LP.UB)[self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])])])
Alipids.f = numpy.concatenate([Alipids.f, numpy.array(
list(self.Problem.LP.f)[self.Problem.LP.col_names.index(self.ModelStructure.ProcessInfo.Elements[pc]['ID'])])])
for p in self.ModelStructure.ProcessInfo.Elements[pc]['Composition'].keys():
lE = sum(list(self.ModelStructure.ProteinInfo.Elements[p]['AAcomposition'].values(
)))*self.ModelStructure.ProcessInfo.Elements[pc]['Composition'][p]
if self.ModelStructure.ProteinInfo.Elements[p]['Compartment'] == 'mOM':
ConstraintMatrixNew[Alipids.col_names.index(
'M_pc_SC_c'), ConstraintMatrix.shape[1]] -= PC_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pe_SC_c'), ConstraintMatrix.shape[1]] -= PE_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ptd1ino_SC_c'), ConstraintMatrix.shape[1]] -= PI_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ps_SC_c'), ConstraintMatrix.shape[1]] -= PS_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_clpn_SC_m'), ConstraintMatrix.shape[1]] -= CL_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pa_SC_c'), ConstraintMatrix.shape[1]] -= PA_mOM*lE/totalAA
MudepIndices += ('M_pc_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_pe_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ptd1ino_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ps_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_clpn_SC_m',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_pa_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
elif self.ModelStructure.ProteinInfo.Elements[p]['Compartment'] == 'mIM':
ConstraintMatrixNew[Alipids.col_names.index(
'M_pc_SC_c'), ConstraintMatrix.shape[1]] -= PC_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pe_SC_c'), ConstraintMatrix.shape[1]] -= PE_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ptd1ino_SC_c'), ConstraintMatrix.shape[1]] -= PI_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ps_SC_c'), ConstraintMatrix.shape[1]] -= PS_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_clpn_SC_m'), ConstraintMatrix.shape[1]] -= CL_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pa_SC_c'), ConstraintMatrix.shape[1]] -= PA_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ergst_c'), ConstraintMatrix.shape[1]] -= ES_mIM*lE/totalAA
MudepIndices += ('M_pc_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_pe_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ptd1ino_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ps_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_clpn_SC_m',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_pa_SC_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
MudepIndices += ('M_ergst_c',
self.ModelStructure.ProcessInfo.Elements[pc]['ID'])
ConstraintMatrix = ConstraintMatrixNew
for e in self.ModelStructure.EnzymeInfo.Elements.keys():
if e not in self.Problem.LP.col_names:
continue
ConstraintMatrixNew = numpy.zeros(
(ConstraintMatrix.shape[0], ConstraintMatrix.shape[1]+1))
ConstraintMatrixNew[:, 0:ConstraintMatrix.shape[1]] = ConstraintMatrix
Alipids.col_names.append(e)
# xnew = list(self.Problem.LP.LB)[self.Problem.LP.col_names.index(e)]
Alipids.LB = numpy.concatenate([Alipids.LB, numpy.array(
list(self.Problem.LP.LB)[self.Problem.LP.col_names.index(e)])])
Alipids.UB = numpy.concatenate([Alipids.UB, numpy.array(
list(self.Problem.LP.UB)[self.Problem.LP.col_names.index(e)])])
Alipids.f = numpy.concatenate([Alipids.f, numpy.array(
list(self.Problem.LP.f)[self.Problem.LP.col_names.index(e)])])
# Alipids.LB = numpy.array(list(Alipids.LB).append(xnew))
# Alipids.UB = numpy.array(list(Alipids.UB).append(
# list(self.Problem.LP.UB)[self.Problem.LP.col_names.index(e)]))
# Alipids.f = numpy.array(list(Alipids.f).append(
# list(self.Problem.LP.f)[self.Problem.LP.col_names.index(e)]))
for p in self.ModelStructure.EnzymeInfo.Elements[e]['Subunits'].keys():
lE = sum(
list(self.ModelStructure.ProteinInfo.Elements[p]['AAcomposition'].values()))
lE *= self.ModelStructure.EnzymeInfo.Elements[e]['Subunits'][p]['StochFac']
if self.ModelStructure.ProteinInfo.Elements[p]['Compartment'] == 'mOM':
ConstraintMatrixNew[Alipids.col_names.index(
'M_pc_SC_c'), ConstraintMatrix.shape[1]] -= PC_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pe_SC_c'), ConstraintMatrix.shape[1]] -= PE_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ptd1ino_SC_c'), ConstraintMatrix.shape[1]] -= PI_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ps_SC_c'), ConstraintMatrix.shape[1]] -= PS_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_clpn_SC_m'), ConstraintMatrix.shape[1]] -= CL_mOM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pa_SC_c'), ConstraintMatrix.shape[1]] -= PA_mOM*lE/totalAA
MudepIndices += ('M_pc_SC_c', e)
MudepIndices += ('M_pe_SC_c', e)
MudepIndices += ('M_ptd1ino_SC_c', e)
MudepIndices += ('M_ps_SC_c', e)
MudepIndices += ('M_clpn_SC_m', e)
MudepIndices += ('M_pa_SC_c', e)
elif self.ModelStructure.ProteinInfo.Elements[p]['Compartment'] == 'mIM':
ConstraintMatrixNew[Alipids.col_names.index(
'M_pc_SC_c'), ConstraintMatrix.shape[1]] -= PC_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pe_SC_c'), ConstraintMatrix.shape[1]] -= PE_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ptd1ino_SC_c'), ConstraintMatrix.shape[1]] -= PI_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ps_SC_c'), ConstraintMatrix.shape[1]] -= PS_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_clpn_SC_m'), ConstraintMatrix.shape[1]] -= CL_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_pa_SC_c'), ConstraintMatrix.shape[1]] -= PA_mIM*lE/totalAA
ConstraintMatrixNew[Alipids.col_names.index(
'M_ergst_c'), ConstraintMatrix.shape[1]] -= ES_mIM*lE/totalAA
MudepIndices += ('M_pc_SC_c', e)
MudepIndices += ('M_pe_SC_c', e)
MudepIndices += ('M_ptd1ino_SC_c', e)
MudepIndices += ('M_ps_SC_c', e)
MudepIndices += ('M_clpn_SC_m', e)
MudepIndices += ('M_pa_SC_c', e)
MudepIndices += ('M_ergst_c', e)
ConstraintMatrix = ConstraintMatrixNew
self.Problem.MuDepIndices_A += MudepIndices
Alipids.A = scipy.sparse.coo_matrix(ConstraintMatrix)
Alipids.mapIndices()
self.Problem.LP.updateMatrix(Alipids, Ainds=MudepIndices)
self.Problem.LP.updateMatrix(MuOneMatrix, Ainds=MudepIndices)
## !!! ##
def eukaryoticDensities3(self, totalAA=3.1, VolumeFraction=False, CompartmentRelationships=True, CompartmentComponents=False):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
totalAA = 3.1*0.91
m_mIM = 0.66
m_mIMS = 2
m_mOM = 8
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
# A[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
# self.Problem.LP.A = scipy.sparse.coo_matrix(A)
A0 = self.Problem.MuOneMatrix.A.toarray()
# A0[numpy.min(DensityIndices):numpy.max(DensityIndices)+1, :] /= totalAA
# self.Problem.MuOneMatrix.A = scipy.sparse.coo_matrix(A0)
OccupationMatrix = RBA_Matrix()
# A = numpy.ones((len(Compartments)+1, len(Compartments)))
A = -numpy.eye(len(Compartments))
# Eye = -numpy.eye(len(Compartments))
# A[0:len(Compartments), :] = Eye
OccupationMatrix.A = scipy.sparse.coo_matrix(A)
# OccupationMatrix.b = numpy.array([-0.209*totalAA]+[float(0)]*(len(Compartments)-1)+[float(totalAA)])
OccupationMatrix.b = numpy.array([-0.209*totalAA]+[float(0)]*(len(Compartments)-1))
OccupationMatrix.f = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.LB = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.UB = numpy.array([float(totalAA)]*len(Compartments))
# OccupationMatrix.row_signs = ['E']*(len(Compartments))+['L']
OccupationMatrix.row_signs = ['E']*(len(Compartments))
# OccupationMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density', 'm_density',
# 'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'TotalProtein']
OccupationMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density',
'm_density', 'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density']
OccupationMatrix.col_names = ['O_n', 'O_mIM', 'O_vM', 'O_mIMS',
'O_m', 'O_erM', 'O_mOM', 'O_x', 'O_cM', 'O_gM', 'O_c']
# CompartmentMatrix.row_signs[CompartmentMatrix.col_names.index('F_m')]='E'
OccupationMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=OccupationMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=OccupationMatrix)
CompartmentMatrix = RBA_Matrix()
if VolumeFraction:
A = numpy.eye(len(Compartments))*5/float(totalAA)
else:
A = numpy.eye(len(Compartments))/float(totalAA)
CompartmentMatrix.A = scipy.sparse.coo_matrix(A)
CompartmentMatrix.b = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.f = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.LB = numpy.array([float(0)]*len(Compartments))
CompartmentMatrix.UB = numpy.array([float(totalAA)]*len(Compartments))
CompartmentMatrix.row_signs = ['L']*(len(Compartments))
# CompartmentMatrix.row_signs = ['E']*(len(Compartments))
CompartmentMatrix.row_names = ['n_volume', 'mIM_volume', 'vM_volume', 'mIMS_volume',
'm_volume', 'erM_volume', 'mOM_volume', 'x_volume', 'cM_volume', 'gM_volume', 'c_volume']
CompartmentMatrix.col_names = ['O_n', 'O_mIM', 'O_vM', 'O_mIMS',
'O_m', 'O_erM', 'O_mOM', 'O_x', 'O_cM', 'O_gM', 'O_c']
CompartmentMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=CompartmentMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=CompartmentMatrix)
VolumeMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
# A[len(Compartments), [1, 5, 6, 8, 9]] = 0
# A[len(Compartments), 8] = 0
VolumeMatrix.A = scipy.sparse.coo_matrix(A)
VolumeMatrix.b = numpy.array([float(0)]*len(Compartments)+[float(1)])
VolumeMatrix.f = numpy.array([float(0)]*len(Compartments))
VolumeMatrix.LB = numpy.array([float(0)]*len(Compartments))
VolumeMatrix.UB = numpy.array([float(1)]*len(Compartments))
VolumeMatrix.row_signs = ['L']*(len(Compartments))+['E']
# VolumeMatrix.row_signs = ['E']*(len(Compartments))+['E']
VolumeMatrix.row_names = ['n_volume', 'mIM_volume', 'vM_volume', 'mIMS_volume', 'm_volume',
'erM_volume', 'mOM_volume', 'x_volume', 'cM_volume', 'gM_volume', 'c_volume', 'TotalVolume']
VolumeMatrix.col_names = ['F_n', 'F_mIM', 'F_vM', 'F_mIMS',
'F_m', 'F_erM', 'F_mOM', 'F_x', 'F_cM', 'F_gM', 'F_c']
if not CompartmentRelationships:
VolumeMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=VolumeMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=VolumeMatrix)
if CompartmentRelationships:
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
VolumeMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
VolumeMatrix.row_signs += ['E', 'E', 'E']
VolumeMatrix.b = numpy.array(list(VolumeMatrix.b)+[float(0)]*3)
Anew[VolumeMatrix.row_names.index(
'm_mIM'), VolumeMatrix.col_names.index('F_m')] = float(1)
Anew[VolumeMatrix.row_names.index(
'm_mIMS'), VolumeMatrix.col_names.index('F_m')] = float(1)
Anew[VolumeMatrix.row_names.index(
'm_mOM'), VolumeMatrix.col_names.index('F_m')] = float(1)
Anew[VolumeMatrix.row_names.index(
'm_mIM'), VolumeMatrix.col_names.index('F_mIM')] = -m_mIM
Anew[VolumeMatrix.row_names.index(
'm_mIMS'), VolumeMatrix.col_names.index('F_mIMS')] = -m_mIMS
Anew[VolumeMatrix.row_names.index(
'm_mOM'), VolumeMatrix.col_names.index('F_mOM')] = -m_mOM
VolumeMatrix.A = scipy.sparse.coo_matrix(Anew)
VolumeMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=VolumeMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=VolumeMatrix)
if CompartmentComponents:
PC_mIM = 0.0000883
PE_mIM = 0.00005852
PI_mIM = 0.00003377
PS_mIM = 0.00000873
CL_mIM = 0.00002
PA_mIM = 0.0000039
ES_mIM = 0.008547
PC_mOM = 0.000636
PE_mOM = 0.0004822
PI_mOM = 0.0001289
PS_mOM = 0.0000167
CL_mOM = 0.00004467
PA_mOM = 0.0000696
ES_mOM = 0.0
PC_vM = 0.0003635
PE_vM = 0.4156
PI_vM = 0.0001297
PS_vM = 0.00003435
CL_vM = 0.0000068
PA_vM = 0.0000186
ES_vM = 0.0142
PC_n = 0.000055
PE_n = 0.000035
PI_n = 0.000017
PS_n = 0.0000072
CL_n = 0.0
PA_n = 0.0000031
ES_n = 0.0086
PC_gM = 0.00043
PE_gM = 0.00044
PI_gM = 0.00041
PS_gM = 0.0
CL_gM = 0.00022
PA_gM = 0.0
ES_gM = 0.0
PC_n = 0.0
PE_n = 0.0
PI_n = 0.0
PS_n = 0.0
CL_n = 0.0
PA_n = 0.0
ES_n = 0.0
PC_gM = 0.0
PE_gM = 0.0
PI_gM = 0.0
PS_gM = 0.0
CL_gM = 0.0
PA_gM = 0.0
ES_gM = 0.0
PC_vM = 0.0
PE_vM = 0.0
PI_vM = 0.0
PS_vM = 0.0
CL_vM = 0.0
PA_vM = 0.0
ES_vM = 0.0
PC_mIM = 0.0
PE_mIM = 0.0
PI_mIM = 0.0
PS_mIM = 0.0
CL_mIM = 0.0
PA_mIM = 0.0
ES_mIM = 0.0
PC_mOM = 0.0
PE_mOM = 0.0
PI_mOM = 0.0
PS_mOM = 0.0
CL_mOM = 0.0
PA_mOM = 0.0
ES_mOM = 0.0
Alipids = RBA_Matrix()
Alipids.col_names = ['F_mIM', 'F_mOM', 'F_vM', 'F_n', 'F_gM']
Alipids.row_names = ['M_pc_SC_c', 'M_pe_SC_c', 'M_ptd1ino_SC_c',
'M_ps_SC_c', 'M_clpn_SC_m', 'M_pa_SC_c', 'M_ergst_c']
Alipids.row_signs = [
self.Problem.LP.row_signs[self.Problem.LP.row_names.index(i)] for i in Alipids.row_names]
Alipids.b = numpy.array(
[self.Problem.LP.b[self.Problem.LP.row_names.index(i)] for i in Alipids.row_names])
Alipids.LB = numpy.array([0, 0, 0, 0, 0])
Alipids.UB = numpy.array([1, 1, 1, 1, 1])
Alipids.f = numpy.array([0, 0, 0, 0, 0])
LipidMatrix = numpy.zeros((7, 5))
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_mIM')] = PC_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_mIM')] = PE_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_mIM')] = PI_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_mIM')] = PS_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_mIM')] = CL_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_mIM')] = PA_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_mIM')] = ES_mIM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_mOM')] = PC_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_mOM')] = PE_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_mOM')] = PI_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_mOM')] = PS_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_mOM')] = CL_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_mOM')] = PA_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_mOM')] = ES_mOM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_vM')] = PC_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_vM')] = PE_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_vM')] = PI_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_vM')] = PS_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_vM')] = CL_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_vM')] = PA_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_vM')] = ES_vM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_n')] = PC_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_n')] = PE_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_n')] = PI_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_n')] = PS_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_n')] = CL_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_n')] = PA_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_n')] = ES_n/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pc_SC_c'), Alipids.col_names.index('F_gM')] = PC_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pe_SC_c'), Alipids.col_names.index('F_gM')] = PE_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ptd1ino_SC_c'), Alipids.col_names.index('F_gM')] = PI_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ps_SC_c'), Alipids.col_names.index('F_gM')] = PS_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_clpn_SC_m'), Alipids.col_names.index('F_gM')] = CL_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_pa_SC_c'), Alipids.col_names.index('F_gM')] = PA_gM/totalAA
LipidMatrix[Alipids.row_names.index(
'M_ergst_c'), Alipids.col_names.index('F_gM')] = ES_gM/totalAA
MudepIndices = [('M_pc_SC_c', i) for i in Alipids.col_names]+[('M_pe_SC_c', i) for i in Alipids.col_names]+[('M_ptd1ino_SC_c', i) for i in Alipids.col_names]+[('M_ps_SC_c', i)
for i in Alipids.col_names]+[('M_clpn_SC_m', i) for i in Alipids.col_names]+[('M_pa_SC_c', i) for i in Alipids.col_names]+[('M_ergst_c', i) for i in Alipids.col_names]
self.Problem.MuDepIndices_A += MudepIndices
Alipids.A = scipy.sparse.coo_matrix(LipidMatrix)
Alipids.mapIndices()
self.Problem.LP.updateMatrix(Alipids, Ainds=MudepIndices)
self.Problem.MuOneMatrix.updateMatrix(Alipids, Ainds=MudepIndices)
## !!! ##
def eukaryoticDensities4(self, CompartmentRelationships=True):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
totalAA = 3.1*0.91
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
A0 = self.Problem.MuOneMatrix.A.toarray()
OccupationMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
OccupationMatrix.b = numpy.array(list([float(0)]*len(Compartments))+[totalAA])
OccupationMatrix.f = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.LB = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.UB = numpy.array([float(totalAA)]*len(Compartments))
OccupationMatrix.row_signs = ['E']*(len(Compartments)+1)
OccupationMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density',
'm_density', 'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'O_total']
OccupationMatrix.col_names = ['O_n', 'O_mIM', 'O_vM', 'O_mIMS',
'O_m', 'O_erM', 'O_mOM', 'O_x', 'O_cM', 'O_gM', 'O_c']
if CompartmentRelationships:
m_mIM = 0.5
m_mIMS = 1
m_mOM = 5
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
OccupationMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
OccupationMatrix.row_signs += ['E', 'E', 'E']
OccupationMatrix.b = numpy.array(list(OccupationMatrix.b)+[float(0)]*3)
Anew[OccupationMatrix.row_names.index(
'm_mIM'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mIMS'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mOM'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mIM'), OccupationMatrix.col_names.index('O_mIM')] = -m_mIM
Anew[OccupationMatrix.row_names.index(
'm_mIMS'), OccupationMatrix.col_names.index('O_mIMS')] = -m_mIMS
Anew[OccupationMatrix.row_names.index(
'm_mOM'), OccupationMatrix.col_names.index('O_mOM')] = -m_mOM
OccupationMatrix.A = scipy.sparse.coo_matrix(Anew)
else:
OccupationMatrix.A = scipy.sparse.coo_matrix(A)
OccupationMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=OccupationMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=OccupationMatrix)
# {'Index':{'Param1':'+','Param2':'+','Param2':'-'}}
#Type: 'Sum'#
# {'Index':'Param1'}
self.Problem.MuDependencies['FromParameters']['b'].update(
{'n_density': 'AAres_PG_nucleus_DNA'})
self.Problem.MuDependencies['FromParameters']['b'].update(
{'O_total': {'Equation': 'amino_acid_concentration_total - AAres_PG_secreted_Euk', 'Variables': ['amino_acid_concentration_total', 'AAres_PG_secreted_Euk']}})
self.Problem.MuDependencies['FromMatrix']['b'].remove('n_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('mIM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('vM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('mIMS_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('m_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('erM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('mOM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('x_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('cM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('gM_density')
self.Problem.MuDependencies['FromMatrix']['b'].remove('c_density')
## !!! ##
def eukaryoticDensities_calibration(self, CompartmentRelationships=False, mitoProportions={}, amino_acid_concentration_total='amino_acid_concentration_total'):
Compartments = ['n', 'mIM', 'vM', 'mIMS', 'm', 'erM', 'mOM', 'x', 'c', 'cM', 'gM']
totalAA_parameter = amino_acid_concentration_total
totalAA = 3.1
DensityIndices = [self.Problem.LP.row_names.index(
i) for i in self.Problem.CompartmentDensities]
A = self.Problem.LP.A.toarray()
A0 = self.Problem.MuOneMatrix.A.toarray()
OccupationMatrix = RBA_Matrix()
A = numpy.ones((len(Compartments)+1, len(Compartments)))
Eye = -numpy.eye(len(Compartments))
A[0:len(Compartments), :] = Eye
OccupationMatrix.b = numpy.array(list([float(0)]*len(Compartments))+[totalAA])
OccupationMatrix.f = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.LB = numpy.array([float(0)]*len(Compartments))
OccupationMatrix.UB = numpy.array([float(totalAA)]*len(Compartments))
OccupationMatrix.row_signs = ['E']*(len(Compartments)+1)
OccupationMatrix.row_names = ['n_density', 'mIM_density', 'vM_density', 'mIMS_density',
'm_density', 'erM_density', 'mOM_density', 'x_density', 'cM_density', 'gM_density', 'c_density', 'O_total']
OccupationMatrix.col_names = ['O_n', 'O_mIM', 'O_vM', 'O_mIMS',
'O_m', 'O_erM', 'O_mOM', 'O_x', 'O_cM', 'O_gM', 'O_c']
if CompartmentRelationships:
if len(list(mitoProportions.keys())) == 3:
m_mIM = mitoProportions['m_mIM']
m_mIMS = mitoProportions['m_mIMS']
m_mOM = mitoProportions['m_mOM']
Anew = numpy.zeros((A.shape[0]+3, A.shape[1]))
Anew[0:A.shape[0], :] = A
OccupationMatrix.row_names += ['m_mIM', 'm_mIMS', 'm_mOM']
OccupationMatrix.row_signs += ['E', 'E', 'E']
OccupationMatrix.b = numpy.array(list(OccupationMatrix.b)+[float(0)]*3)
Anew[OccupationMatrix.row_names.index(
'm_mIM'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mIMS'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mOM'), OccupationMatrix.col_names.index('O_m')] = float(1)
Anew[OccupationMatrix.row_names.index(
'm_mIM'), OccupationMatrix.col_names.index('O_mIM')] = -m_mIM
Anew[OccupationMatrix.row_names.index(
'm_mIMS'), OccupationMatrix.col_names.index('O_mIMS')] = -m_mIMS
Anew[OccupationMatrix.row_names.index(
'm_mOM'), OccupationMatrix.col_names.index('O_mOM')] = -m_mOM
OccupationMatrix.A = scipy.sparse.coo_matrix(Anew)
else:
OccupationMatrix.A = scipy.sparse.coo_matrix(A)
OccupationMatrix.mapIndices()
self.Problem.LP.addMatrix(matrix=OccupationMatrix)
self.Problem.MuOneMatrix.addMatrix(matrix=OccupationMatrix)
# {'Index':{'Param1':'+','Param2':'+','Param2':'-'}}
#Type: 'Sum'#
# {'Index':'Param1'}
self.Problem.MuDependencies['FromParameters']['b'].update(
{'n_density': {'Equation': '-nonenzymatic_proteins_n/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_n', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'mIM_density': {
'Equation': '-nonenzymatic_proteins_mIM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_mIM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'vM_density': {
'Equation': '-nonenzymatic_proteins_vM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_vM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'mIMS_density': {
'Equation': '-nonenzymatic_proteins_mIMS/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_mIMS', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update(
{'m_density': {'Equation': '-nonenzymatic_proteins_m/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_m', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'erM_density': {
'Equation': '-nonenzymatic_proteins_erM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_erM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'mOM_density': {
'Equation': '-nonenzymatic_proteins_mOM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_mOM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update(
{'x_density': {'Equation': '-nonenzymatic_proteins_x/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_x', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'cM_density': {
'Equation': '-nonenzymatic_proteins_cM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_cM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'gM_density': {
'Equation': '-nonenzymatic_proteins_gM/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_gM', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update(
{'c_density': {'Equation': '-nonenzymatic_proteins_c/inverse_average_protein_length', 'Variables': ['nonenzymatic_proteins_c', 'inverse_average_protein_length']}})
self.Problem.MuDependencies['FromParameters']['b'].update({'O_total': {'Equation': '{} - nonenzymatic_proteins_Secreted/inverse_average_protein_length'.format(totalAA_parameter), 'Variables': [
totalAA_parameter, 'nonenzymatic_proteins_Secreted', 'inverse_average_protein_length']}})
# !!! deal with hardcoded parameter_names... !!!
def estimate_specific_Kapps(self, proteomicsData, flux_bounds, mu, biomass_function=None, target_biomass_function=True, parsimonious_fba=True):
"""
Parameters
----------
proteomicsData : pandas.DataFrame (in mmol/gDW)
flux_bounds : pandas.DataFrame (in mmol/(gDW*h))
mu : float (in 1/h)
biomass_function : str
target_biomass_function : bool
atp_maintenance_to_biomassfunction : bool
eukaryotic : bool
"""
from scipy.stats.mstats import gmean
old_model = copy.deepcopy(self.model)
for i in self.model.targets.target_groups._elements_by_id['translation_targets'].concentrations._elements:
if i.species == 'average_protein_c':
new_agg = rba.xml.parameters.Aggregate(id_='total_protein', type_='multiplication')
new_agg.function_references.append(rba.xml.parameters.FunctionReference(
function='amino_acid_concentration_total'))
new_agg.function_references.append(rba.xml.parameters.FunctionReference(
function='inverse_average_protein_length'))
self.model.parameters.aggregates._elements.append(new_agg)
i.value = 'total_protein'
else:
self.model.targets.target_groups._elements_by_id['translation_targets'].concentrations._elements.remove(
i)
for i in self.model.targets.target_groups._elements_by_id['transcription_targets'].concentrations._elements:
if i.species == 'mrna':
new_agg = rba.xml.parameters.Aggregate(id_='total_rna', type_='multiplication')
new_agg.function_references.append(rba.xml.parameters.FunctionReference(
function='RNA_massfraction_CarbonLimitation'))
new_agg.function_references.append(
rba.xml.parameters.FunctionReference(function='RNA_inversemillimolarweight'))
self.model.parameters.aggregates._elements.append(new_agg)
i.value = 'total_rna'
else:
self.model.targets.target_groups._elements_by_id['transcription_targets'].concentrations._elements.remove(
i)
self.rebuild_from_model()
self.setMedium(self.Medium)
self.addExchangeReactions()
self.setMu(mu)
if target_biomass_function:
self.buildFBA(objective='targets', maintenanceToBM=True)
BMfunction = 'R_BIOMASS_targetsRBA'
else:
self.buildFBA(objective='classic', maintenanceToBM=False)
BMfunction = biomass_function
for j in [i for i in self.Medium.keys() if self.Medium[i] == 0]:
Exrxn = 'R_EX_'+j.split('M_')[-1]+'_e'
self.FBA.setUB({Exrxn: 0})
rxn_LBs = {}
rxn_UBs = {}
for rx in flux_bounds['Reaction_ID']:
lb = flux_bounds.loc[flux_bounds['Reaction_ID'] == rx, 'LB'].values[0]
ub = flux_bounds.loc[flux_bounds['Reaction_ID'] == rx, 'UB'].values[0]
if not pandas.isna(lb):
rxn_LBs.update({rx: lb})
if not pandas.isna(ub):
rxn_UBs.update({rx: ub})
self.FBA.setLB(rxn_LBs)
self.FBA.setUB(rxn_UBs)
self.FBA.clearObjective()
self.FBA.setObjectiveCoefficients({BMfunction: -1})
self.FBA.solveLP()
BMfluxOld = self.FBA.SolutionValues[BMfunction]
if parsimonious_fba:
self.FBA.parsimonise()
self.FBA.setLB(rxn_LBs)
self.FBA.setUB(rxn_UBs)
self.FBA.setLB({BMfunction: BMfluxOld})
self.FBA.setUB({BMfunction: BMfluxOld})
self.FBA.solveLP()
FluxDistribution = pandas.DataFrame(index=list(
self.FBA.SolutionValues.keys()), columns=['FluxValues'])
FluxDistribution['FluxValues'] = list(self.FBA.SolutionValues.values())
BMfluxNew = self.FBA.SolutionValues[BMfunction]
ProtoIDmap = {}
for i in self.ModelStructure.ProteinInfo.Elements.keys():
ProtoID = self.ModelStructure.ProteinInfo.Elements[i]['ProtoID']
if ProtoID in list(proteomicsData['ID']):
if not pandas.isna(proteomicsData.loc[proteomicsData['ID'] == ProtoID, 'copy_number'].values[0]):
if proteomicsData.loc[proteomicsData['ID'] == ProtoID, 'copy_number'].values[0] != 0:
if ProtoID in ProtoIDmap.keys():
ProtoIDmap[ProtoID]['ModelProteins'].append(i)
else:
ProtoIDmap.update(
{ProtoID: {'ModelProteins': [i], 'CopyNumber': proteomicsData.loc[proteomicsData['ID'] == ProtoID, 'copy_number'].values[0]}})
ReactionMap = {}
for i in self.ModelStructure.ReactionInfo.Elements.keys():
if '_duplicate_' in i:
continue
else:
if i in list(FluxDistribution.index):
if FluxDistribution.loc[i, 'FluxValues'] != 0:
ReactionMap.update({i: {'ModelReactions': list(
[i]+self.ModelStructure.ReactionInfo.Elements[i]['Twins']), 'Flux': FluxDistribution.loc[i, 'FluxValues']}})
IsoReaction2ProtoReaction = {}
for i in ReactionMap.keys():
for j in ReactionMap[i]['ModelReactions']:
IsoReaction2ProtoReaction[j] = i
EnzymeMap = {}
for i in self.ModelStructure.EnzymeInfo.Elements.keys():
if self.ModelStructure.EnzymeInfo.Elements[i]['Reaction'] in IsoReaction2ProtoReaction:
CompositionDict = {self.ModelStructure.ProteinInfo.Elements[j]['ProtoID']: self.ModelStructure.EnzymeInfo.Elements[
i]['Subunits'][j] for j in self.ModelStructure.EnzymeInfo.Elements[i]['Subunits'].keys()}
ProtoReaction = IsoReaction2ProtoReaction[self.ModelStructure.EnzymeInfo.Elements[i]['Reaction']]
CopyNumbers = []
Stoichiometries = []
EnzymeNumbers = []
for j in CompositionDict.keys():
if j in ProtoIDmap.keys():
CopyNumbers.append(ProtoIDmap[j]['CopyNumber'])
Stoichiometries.append(CompositionDict[j])
EnzymeNumbers.append(ProtoIDmap[j]['CopyNumber']/CompositionDict[j])
GM_enzymenumber = 0
if len(EnzymeNumbers) > 0:
GM_enzymenumber = gmean(numpy.array(EnzymeNumbers))
EnzymeMap.update(
{i: {'ProtoReaction': ProtoReaction, 'EnzymeNumber': GM_enzymenumber}})
EnzymeMap2 = {}
for i in ReactionMap.keys():
totalIsoEnzymeNumber = 0
for j in ReactionMap[i]['ModelReactions']:
respectiveEnzyme = self.ModelStructure.ReactionInfo.Elements[j]['Enzyme']
if respectiveEnzyme in EnzymeMap.keys():
totalIsoEnzymeNumber += EnzymeMap[respectiveEnzyme]['EnzymeNumber']
for j in ReactionMap[i]['ModelReactions']:
respectiveEnzyme = self.ModelStructure.ReactionInfo.Elements[j]['Enzyme']
if respectiveEnzyme in EnzymeMap.keys():
concentration = EnzymeMap[respectiveEnzyme]['EnzymeNumber']
if concentration != 0:
if numpy.isfinite(concentration):
specificFlux = ReactionMap[i]['Flux'] * \
EnzymeMap[respectiveEnzyme]['EnzymeNumber']/totalIsoEnzymeNumber
EnzymeMap2.update({respectiveEnzyme: {'CopyNumber': EnzymeMap[respectiveEnzyme]['EnzymeNumber'],
'Concentration': concentration, 'Flux': specificFlux, 'Kapp': abs(specificFlux/concentration)}})
self.model = old_model
self.rebuild_from_model()
self.setMedium(self.Medium)
out = pandas.DataFrame()
for i in EnzymeMap2.keys():
# if EnzymeMap2[i]['CopyNumber'] == 0:
# continue
out.loc[i, 'Enzyme_ID'] = i
out.loc[i, 'CopyNumber'] = EnzymeMap2[i]['CopyNumber']
out.loc[i, 'Concentration'] = EnzymeMap2[i]['Concentration']
out.loc[i, 'Flux'] = EnzymeMap2[i]['Flux']
out.loc[i, 'Kapp'] = EnzymeMap2[i]['Kapp']
return(out)
def estimate_default_Kapps(self, target_mu, compartment_densities_and_PGs=None, flux_bounds=None, plateau_limit=4, mu_approximation_precision=0.005, transporter_to_lumen_coefficient=10, default_kapp_LB=0, default_kapp_UB=1000000, start_val=200000, densities_to_fix=None, eukaryotic=False):
"""
Parameters
----------
target_mu : float
compartment_densities_and_PGs : pandas.DataFrame
flux_bounds : pandas.DataFrame
"""
orig_enz = self.model.parameters.functions._elements_by_id[
'default_efficiency'].parameters._elements_by_id['CONSTANT'].value
out = pandas.DataFrame()
for comp in list(compartment_densities_and_PGs['Compartment_ID']):
self.model.parameters.functions._elements_by_id[str(
'fraction_protein_'+comp)].parameters._elements_by_id['CONSTANT'].value = compartment_densities_and_PGs.loc[compartment_densities_and_PGs['Compartment_ID'] == comp, 'Density']
self.model.parameters.functions._elements_by_id[str(
'fraction_non_enzymatic_protein_'+comp)].parameters._elements_by_id['CONSTANT'].value = compartment_densities_and_PGs.loc[compartment_densities_and_PGs['Compartment_ID'] == comp, 'PG_fraction']
self.rebuild_from_model()
self.addExchangeReactions()
self.setMedium(self.Medium)
if densities_to_fix is None:
comp_density_rows = list(self.Problem.CompartmentDensities)
self.Problem.setConstraintType(
dict(zip(comp_density_rows, ['E']*len(comp_density_rows))))
else:
if len(densities_to_fix) != 0:
comp_density_rows = densities_to_fix
self.Problem.setConstraintType(
dict(zip(comp_density_rows, ['E']*len(comp_density_rows))))
rxn_LBs = {}
rxn_UBs = {}
for rx in flux_bounds['Reaction_ID']:
lb = flux_bounds.loc[flux_bounds['Reaction_ID'] == rx, 'LB'].values[0]
ub = flux_bounds.loc[flux_bounds['Reaction_ID'] == rx, 'UB'].values[0]
if not pandas.isna(lb):
rxn_LBs.update({rx: lb})
if not pandas.isna(ub):
rxn_UBs.update({rx: ub})
self.Problem.setLB(rxn_LBs)
self.Problem.setUB(rxn_UBs)
kapp_LB = default_kapp_LB
if default_kapp_UB is not None:
kapp_UB = default_kapp_UB
else:
kapp_UB = orig_enz*1000
# new_kapp = (kapp_UB+kapp_LB)/2
if start_val is not None:
new_kapp = start_val
else:
new_kapp = orig_enz
Mu_pred = self.findMaxGrowthRate(precision=0.005, max=1)
Mus = []
Mus_Error = []
Kapps = []
last_Mu = numpy.nan
plateau_count = 0
if abs(target_mu - Mu_pred) > mu_approximation_precision:
while abs(target_mu - Mu_pred) > mu_approximation_precision:
if plateau_count >= plateau_limit:
break
self.model.parameters.functions._elements_by_id[
'default_efficiency'].parameters._elements_by_id['CONSTANT'].value = new_kapp
self.model.parameters.functions._elements_by_id['default_transporter_efficiency'].parameters._elements_by_id[
'CONSTANT'].value = transporter_to_lumen_coefficient*new_kapp
self.rebuild_from_model()
self.addExchangeReactions()
self.setMedium(self.Medium)
self.Problem.setLB(rxn_LBs)
self.Problem.setUB(rxn_UBs)
if densities_to_fix is None:
comp_density_rows = list(self.Problem.CompartmentDensities)
self.Problem.setConstraintType(
dict(zip(comp_density_rows, ['E']*len(comp_density_rows))))
else:
if len(densities_to_fix) != 0:
comp_density_rows = densities_to_fix
self.Problem.setConstraintType(
dict(zip(comp_density_rows, ['E']*len(comp_density_rows))))
Mu_pred = self.findMaxGrowthRate(precision=0.005, max=1)
Mus_Error.append(abs(target_mu - Mu_pred))
Mus.append(Mu_pred)
Kapps.append(new_kapp)
if Mu_pred > target_mu:
new_kapp_prelim = kapp_LB+(0.5*abs(kapp_LB-new_kapp))
kapp_UB = new_kapp
elif Mu_pred < target_mu:
new_kapp_prelim = kapp_UB-(0.5*abs(new_kapp-kapp_UB))
kapp_LB = new_kapp
new_kapp = new_kapp_prelim
if len(Mus) > 2:
if Mus[-2] == Mu_pred:
plateau_count += 1
else:
plateau_count = 0
else:
Mus.append(Mu_pred)
Mus_Error.append(abs(target_mu - Mu_pred))
Kapps.append(
self.model.parameters.functions._elements_by_id['default_efficiency'].parameters._elements_by_id['CONSTANT'].value)
self.rebuild_from_model()
self.setMedium(self.Medium)
out = pandas.DataFrame()
out['Mu'] = Mus
out['delta_Mu'] = Mus_Error
out['default_efficiency'] = Kapps
out['default_transporter_efficiency'] = [transporter_to_lumen_coefficient*i for i in Kapps]
return(out)
def inject_default_kapps(self, default_kapp, default_transporter_kapp):
if numpy.isfinite(default_kapp):
self.model.parameters.functions._elements_by_id[
'default_efficiency'].parameters._elements_by_id['CONSTANT'].value = default_kapp
if numpy.isfinite(default_transporter_kapp):
self.model.parameters.functions._elements_by_id[
'default_transporter_efficiency'].parameters._elements_by_id['CONSTANT'].value = default_transporter_kapp
self.rebuild_from_model()
def inject_process_capacities(self, process_efficiencies):
"""
Parameters
----------
process_efficiencies : pandas.DataFrame(columns=['Process','Parameter','Value'])
"""
for i in process_efficiencies.index:
if numpy.isfinite(process_efficiencies.loc[i, 'Value']):
if process_efficiencies.loc[i, 'Process'] in self.model.processes.processes._elements_by_id.keys():
if not pandas.isna(process_efficiencies.loc[i, 'Value']):
self.model.processes.processes._elements_by_id[process_efficiencies.loc[i,
'Process']].machinery.capacity.value = process_efficiencies.loc[i, 'Parameter']
const = rba.xml.parameters.Function(process_efficiencies.loc[i, 'Parameter'], 'constant', parameters={
'CONSTANT': process_efficiencies.loc[i, 'Value']}, variable=None)
if process_efficiencies.loc[i, 'Parameter'] not in self.model.parameters.functions._elements_by_id.keys():
self.model.parameters.functions.append(const)
else:
self.model.parameters.functions._elements_by_id[const.id].parameters._elements_by_id[
'CONSTANT'].value = process_efficiencies.loc[i, 'Value']
self.rebuild_from_model()
def inject_specific_kapps(self, specific_kapps, round_to_digits=0):
"""
Parameters
----------
specific_kapps : pandas.DataFrame
"""
parameterized = []
if 'Enzyme_ID' in list(specific_kapps.columns):
for enz in list(specific_kapps['Enzyme_ID']):
if not pandas.isna(specific_kapps.loc[specific_kapps['Enzyme_ID'] == enz, 'Kapp'].values[0]):
if numpy.isfinite(specific_kapps.loc[specific_kapps['Enzyme_ID'] == enz, 'Kapp'].values[0]):
if enz not in parameterized:
all_enzs = self.ModelStructure.EnzymeInfo.Elements[enz]['Isozymes']
all_enzs.append(enz)
parameterized += all_enzs
if len(all_enzs) == 1:
proto_enz = all_enzs[0]
else:
proto_enz = [i for i in all_enzs if not '_duplicate_' in i][0]
val = round(specific_kapps.loc[specific_kapps['Enzyme_ID']
== enz, 'Kapp'].values[0], round_to_digits)
const = rba.xml.parameters.Function(
str(proto_enz + '_kapp__constant'), 'constant', parameters={'CONSTANT': val}, variable=None)
if str(proto_enz + '_kapp__constant') not in self.model.parameters.functions._elements_by_id.keys():
self.model.parameters.functions.append(const)
else:
# self.model.parameters.functions._elements_by_id[const.id] = const
self.model.parameters.functions._elements_by_id[
const.id].parameters._elements_by_id['CONSTANT'].value = val
count = 0
# self.model.parameters.functions._elements_by_id['default_efficiency'].parameters._elements_by_id['CONSTANT'].value = default_kapp
for e in self.model.enzymes.enzymes:
if e.id in all_enzs:
count += 1
e.forward_efficiency = str(proto_enz + '_kapp__constant')
e.backward_efficiency = str(proto_enz + '_kapp__constant')
if count == len(all_enzs):
break
self.rebuild_from_model()
def get_parameter_definition(self, parameter):
if parameter in self.model.parameters.functions._elements_by_id.keys():
function = self.model.parameters.functions._elements_by_id[parameter]
expression = parse_function(function)
elif parameter in self.model.parameters.aggregates._elements_by_id.keys():
function_id_list = get_function_list_of_aggregate(
aggregate=self.model.parameters.aggregates._elements_by_id[parameter])
expression = parse_aggregate(aggregate=self.model.parameters.aggregates._elements_by_id[parameter], function_list=[
self.model.parameters.functions._elements_by_id[f_id] for f_id in function_id_list])
else:
return({})
return(expression)
def get_parameter_value(self, parameter):
if parameter in self.model.parameters.functions._elements_by_id.keys():
function = self.model.parameters.functions._elements_by_id[parameter]
expression = parse_function_with_parameter_values(function)
elif parameter in self.model.parameters.aggregates._elements_by_id.keys():
function_id_list = get_function_list_of_aggregate(
aggregate=self.model.parameters.aggregates._elements_by_id[parameter])
expression = parse_aggregate_with_parameter_values(aggregate=self.model.parameters.aggregates._elements_by_id[parameter], function_list=[
self.model.parameters.functions._elements_by_id[f_id] for f_id in function_id_list])
else:
return({parameter: numpy.nan})
variable_values = {}
for v in expression[parameter]['Variables']:
if v == 'growth_rate':
variable_values[v] = self.Mu
elif v in self.Medium.keys():
variable_values[v] = self.Medium[v]
elif v.endswith('_e'):
if v[:-2] in self.Medium.keys():
variable_values[v] = self.Medium[v[:-2]]
else:
variable_values = {}
return({parameter: numpy.nan})
result = evaluate_expression(expression_dictionary=expression,
variable_values=variable_values)
return(result)
def get_parameter_values(self, parameter_type, species=None, output_format='dict'):
if parameter_type == 'medium_composition':
if species is None:
results = self.Medium
elif type(species) is str:
results = {species: self.Medium[species]}
elif type(species) is list:
results = {sp: self.Medium[sp] for sp in species}
elif parameter_type == 'machine_efficiencies':
if species is None:
parameter_names = {process_name: self.model.processes.processes._elements_by_id[self.ModelStructure.ProcessInfo.Elements[
process_name]['ID']].machinery.capacity.value for process_name in self.ModelStructure.ProcessInfo.Elements.keys()}
elif type(species) is str:
parameter_names = {
species: self.model.processes.processes._elements_by_id[self.ModelStructure.ProcessInfo.Elements[species]['ID']].machinery.capacity.value}
elif type(species) is list:
parameter_names = {
sp: self.model.processes.processes._elements_by_id[self.ModelStructure.ProcessInfo.Elements[sp]['ID']].machinery.capacity.value for sp in species}
results = {pn: self.get_parameter_value(
parameter=parameter_names[pn]) for pn in parameter_names}
elif parameter_type == 'enzyme_efficiencies' or parameter_type == 'enzyme_efficiencies_forward' or parameter_type == 'enzyme_efficiencies_backward':
if species is None:
parameter_names = {enzyme_name: {'Forward': self.model.enzymes.enzymes._elements_by_id[enzyme_name].forward_efficiency, 'Backward': self.model.enzymes.enzymes._elements_by_id[
enzyme_name].backward_efficiency} for enzyme_name in self.ModelStructure.EnzymeInfo.Elements.keys()}
elif type(species) is str:
parameter_names = {species: {'Forward': self.model.enzymes.enzymes._elements_by_id[
species].forward_efficiency, 'Backward': self.model.enzymes.enzymes._elements_by_id[species].backward_efficiency}}
elif type(species) is list:
parameter_names = {enzyme_name: {'Forward': self.model.enzymes.enzymes._elements_by_id[enzyme_name].forward_efficiency,
'Backward': self.model.enzymes.enzymes._elements_by_id[enzyme_name].backward_efficiency} for enzyme_name in species}
if parameter_type == 'enzyme_efficiencies':
results = {pn: {'Forward': self.get_parameter_value(parameter=parameter_names[pn]['Forward']), 'Backward': self.get_parameter_value(
parameter=parameter_names[pn]['Backward'])} for pn in parameter_names.keys()}
elif parameter_type == 'enzyme_efficiencies_forward':
results = {pn: self.get_parameter_value(
parameter=parameter_names[pn]['Forward']) for pn in parameter_names.keys()}
elif parameter_type == 'enzyme_efficiencies_backward':
results = {pn: self.get_parameter_value(
parameter=parameter_names[pn]['Backward']) for pn in parameter_names.keys()}
elif parameter_type == 'maximal_densities':
density_dict = {i.compartment: self.get_parameter_value(
parameter=i.upper_bound) for i in self.model.density.target_densities}
if species is None:
results = density_dict
elif type(species) is str:
results = {species: density_dict[species]
for sp in [species] if sp in density_dict.keys()}
elif type(species) is list:
results = {sp: density_dict[sp] for sp in species if sp in density_dict.keys()}
elif parameter_type == 'target_values':
target_dict = {self.ModelStructure.TargetInfo.Elements[target_ID]['TargetEntity']: {'Target_id': target_ID, 'Target_value': self.get_parameter_value(
parameter=self.ModelStructure.TargetInfo.Elements[target_ID]['TargetValue'])} for target_ID in self.ModelStructure.TargetInfo.Elements.keys()}
if species is None:
results = target_dict
elif type(species) is str:
results = {species: target_dict[species]
for sp in [species] if sp in target_dict.keys()}
elif type(species) is list:
results = {sp: target_dict[sp] for sp in species if sp in target_dict.keys()}
if output_format == 'dict':
return(results)
if output_format == 'json':
return(json.dumps(results))
def get_parameter_value_from_model(function, parameter_ID):
return(function.parameters._elements_by_id[parameter_ID].value)
def make_paramter_function_specific(function_ID, parameter, return_normal=False):
if return_normal:
return(str(parameter))
else:
return(str('{}__parameter__{}'.format(function_ID, parameter)))
def parse_function(function):
independent_variable = function.variable
function_ID = function.id
if function.type == 'constant':
eq = make_paramter_function_specific(
function_ID=function_ID, parameter='CONSTANT', return_normal=True)
latex_string = str(make_paramter_function_specific(
function_ID=function_ID, parameter='CONSTANT', return_normal=True))
function_parameter_values = {'CONSTANT': get_parameter_value_from_model(
function=function, parameter_ID='CONSTANT')}
elif function.type == 'exponential':
eq = 'e**({}*{})'.format(make_paramter_function_specific(function_ID=function_ID,
parameter='RATE', return_normal=True), str(independent_variable))
latex_string = str('e^{'+str(make_paramter_function_specific(function_ID=function_ID,
parameter='RATE', return_normal=True)) + ' '+str(independent_variable)+'}')
function_parameter_values = {'RATE': get_parameter_value_from_model(
function=function, parameter_ID='RATE')}
elif function.type == 'linear':
eq = str('{}+{}*{}'.format(make_paramter_function_specific(function_ID=function_ID, parameter='LINEAR_CONSTANT', return_normal=True),
make_paramter_function_specific(function_ID=function_ID, parameter='LINEAR_COEF', return_normal=True), str(independent_variable)))
latex_string = str(make_paramter_function_specific(function_ID=function_ID, parameter='LINEAR_CONSTANT', return_normal=True) +
make_paramter_function_specific(function_ID=function_ID, parameter='LINEAR_COEF', return_normal=True)+' '+str(independent_variable))
function_parameter_values = {'LINEAR_CONSTANT': get_parameter_value_from_model(function=function, parameter_ID='LINEAR_CONSTANT'),
'LINEAR_COEF': get_parameter_value_from_model(function=function, parameter_ID='LINEAR_COEF'),
'X_MIN': get_parameter_value_from_model(function=function, parameter_ID='X_MIN'),
'X_MAX': get_parameter_value_from_model(function=function, parameter_ID='X_MAX'),
'Y_MIN': get_parameter_value_from_model(function=function, parameter_ID='Y_MIN'),
'Y_MAX': get_parameter_value_from_model(function=function, parameter_ID='Y_MAX'), }
elif function.type == 'michaelisMenten':
eq = str('{}*{}/({}+{})'.format(make_paramter_function_specific(function_ID=function_ID, parameter='kmax', return_normal=True),
str(independent_variable), str(independent_variable), make_paramter_function_specific(function_ID=function_ID, parameter='Km', return_normal=True)))
function_parameter_values = {'kmax': get_parameter_value_from_model(function=function, parameter_ID='kmax'),
'Km': get_parameter_value_from_model(function=function, parameter_ID='Km'),
'Y_MIN': get_parameter_value_from_model(function=function, parameter_ID='Y_MIN')}
return({function_ID: {'Type': function.type, 'Equation': eq, 'Variables': [str(independent_variable)], 'Function_parameters': function_parameter_values}})
def parse_function_with_parameter_values(function):
independent_variable = function.variable
function_ID = function.id
if function.type == 'constant':
return({function_ID: {'Equation': '{}'.format(str(get_parameter_value_from_model(function=function, parameter_ID='CONSTANT'))), 'Variables': []}})
elif function.type == 'exponential':
return({function_ID: {'Equation': '{}**({}*{})'.format(str(numpy.e), str(get_parameter_value_from_model(function=function, parameter_ID='RATE')), str(independent_variable)), 'Variables': [str(independent_variable)]}})
elif function.type == 'linear':
return({function_ID: {'Equation': str('{}+{}*{}'.format(str(get_parameter_value_from_model(function=function, parameter_ID='LINEAR_CONSTANT')), str(get_parameter_value_from_model(function=function, parameter_ID='LINEAR_COEF')), str(independent_variable))), 'Variables': [str(independent_variable)]}})
elif function.type == 'michaelisMenten':
return({function_ID: {'Equation': str('{}*{}/({}+{})'.format(str(get_parameter_value_from_model(function=function, parameter_ID='kmax')), str(independent_variable), str(get_parameter_value_from_model(function=function, parameter_ID='Km')), str(independent_variable))), 'Variables': [str(independent_variable)]}})
def get_parameter_of_function(function, parameter):
return(function.parameters._elements_by_id[parameter])
def join_functions_multiplicatively(parsed_function_list):
term_list = []
variable_list = []
for function in parsed_function_list:
function_ID = list(function.keys())[0]
term_list.append(str('('+function[function_ID]['Equation']+')'))
variable_list += function[function_ID]['Variables']
return({'Type': 'Aggregate', 'Equation': '*'.join(term_list), 'Variables': list(set(variable_list))})
def get_function_list_of_aggregate(aggregate):
return([agg.function for agg in aggregate.function_references._elements])
def parse_aggregate_with_parameter_values(aggregate, function_list):
aggregate_ID = aggregate.id
if aggregate.type == 'multiplication':
parsed_function_list = [parse_function_with_parameter_values(
function) for function in function_list]
return({aggregate_ID: join_functions_multiplicatively(parsed_function_list=parsed_function_list)})
else:
return({aggregate_ID: {'Equation': '', 'Variables': []}})
def parse_aggregate(aggregate, function_list):
aggregate_ID = aggregate.id
if aggregate.type == 'multiplication':
parsed_function_list = [parse_function(function) for function in function_list]
result = {aggregate_ID: join_functions_multiplicatively(
parsed_function_list=parsed_function_list)}
result[aggregate_ID]['Multiplicative Terms'] = [f.id for f in function_list]
return(result)
else:
return({aggregate_ID: {'Type': 'Aggregate', 'Equation': '', 'Variables': [], 'Multiplicative Terms': []}})
# def transform_to_latex(equation):
#
def MediumDependentCoefficients_A(Controler):
out = {}
MedDepRxns = [list(i.keys()) for i in list(Controler.ExchangeMap.values())]
MedDepRxnsFlatted = list(set([item for sublist in MedDepRxns for item in sublist]))
for i in Controler.ModelStructure.EnzymeConstraintsInfo.Elements.keys():
if Controler.ModelStructure.EnzymeConstraintsInfo.Elements[i]['AssociatedReaction'] in MedDepRxnsFlatted:
nonConst = False
for j in Controler.ModelStructure.EnzymeConstraintsInfo.Elements[i]['CapacityParameter']:
if list(j.values())[0]['FunctionType'] != 'constant':
nonConst = True
if nonConst:
if Controler.ModelStructure.EnzymeConstraintsInfo.Elements[i]['AssociatedReaction'] in list(out.keys()):
out[Controler.ModelStructure.EnzymeConstraintsInfo.Elements[i]
['AssociatedReaction']].append(i)
else:
out.update(
{Controler.ModelStructure.EnzymeConstraintsInfo.Elements[i]['AssociatedReaction']: [i]})
return([(out[i][0], Controler.ModelStructure.ReactionInfo.Elements[i]['Enzyme'])for i in out.keys()])
def QualitativeMediumChange(Controller, changes, species):
QualitativeMediumChange = False
if float(Controller.Medium[species]) == float(0):
if float(changes[species]) != float(0):
boundValue = 1000.0
QualitativeMediumChange = True
else:
return([QualitativeMediumChange])
if float(Controller.Medium[species]) != float(0):
if float(changes[species]) == float(0):
boundValue = 0.0
QualitativeMediumChange = True
else:
return([QualitativeMediumChange])
return([QualitativeMediumChange, float(boundValue)])
def ProtoProteomeRecording(Controller, run, Proteinlevels):
out = {}
for i in list(Controller.ModelStructure.ProteinGeneMatrix['ProtoProteins']):
row_ind = list(Controller.ModelStructure.ProteinGeneMatrix['ProtoProteins']).index(i)
nonZero = list(numpy.nonzero(
Controller.ModelStructure.ProteinGeneMatrix['Matrix'][row_ind, :])[0])
level = 0
for j in nonZero:
id = Controller.ModelStructure.ProteinGeneMatrix['Proteins'][j]
level += Proteinlevels.loc[id, run]
out.update({i: level})
return(out)
def ProteomeRecording(Controller, run):
EnzDF = pandas.DataFrame(index=Controller.Problem.Enzymes)
PrcDF = | pandas.DataFrame(index=Controller.Problem.Processes) | pandas.DataFrame |
import torch
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os, sys, time, datetime, pathlib, random, math
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms as tvtransforms
from skimage import io, transform
# HELPER FUNCTION
def _check_if_array_3D(source_image, boneless_image=None):
# Check if array is 3D or 2D
iters = 0
img_list = [source_image, boneless_image]
for image in img_list:
if image is not None:
if image.ndim == 3:
# make the image grayscale
image = image[:,:,0]
iters+=1
if iters == 1:
source_image = image
if iters == 2:
boneless_image = image
if boneless_image is None:
return source_image
else:
return source_image, boneless_image
###########################
# JSRT CXR dataset
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.: Development of a digital image database for chest radiographs with and without a lung nodule: Receiver operating characteristic analysis of radiologists’ detection of pulmonary nodules. AJR 174; 71-74, 2000
###########################
class JSRT_CXR(Dataset):
def __init__(self, data_normal, data_BSE, transform):
"""
Inputs:
data_normal: root directory holding the normal / non-suppressed images
data_BSE: root directory holding the bone-suppressed images
transform: (optional) a torchvision.transforms.Compose series of transformations
Assumed that files corresponding to the same patient have the same name in both folders data_normal and data_BSE.
"""
if data_BSE is not None:
sample = {"Patient": [], "boneless":[], "source":[]}
else:
sample = {"Patient": [], "source":[]}
for root, dirs, files in os.walk(data_normal):
for name in files:
if '.png' in name:
a_filepath = os.path.join(root, name)
# Patient code
head, tail = os.path.split(a_filepath)
patient_code_file = os.path.splitext(tail)[0]
# Place into lists
sample["Patient"].append(patient_code_file)
sample["source"].append(a_filepath)
# For each patient code, search the alternate data_folder to obtain the corresponding source
if data_BSE is not None:
for root2, dirs2, files2 in os.walk(data_BSE):
for name2 in files2:
# Need regex to distinguish between e.g. 0_1 and 0_10
filename2,_ = os.path.splitext(name2)
if patient_code_file == filename2:
sample["boneless"].append(os.path.join(root2, name2))
self.data = pd.DataFrame(sample)
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
"""Describe the reading of images in here"""
if torch.is_tensor(idx):
idx = idx.tolist() # transform into python list
patient_code = self.data["Patient"].iloc[idx]
source_image = plt.imread(self.data["source"].iloc[idx])
boneless_image = plt.imread(self.data["boneless"].iloc[idx])
source_image, boneless_image = _check_if_array_3D(source_image, boneless_image)
sample = {'source': source_image, 'boneless': boneless_image} #'patientCode': patient_code
if self.transform:
sample = self.transform(sample)
return sample
def visualise(self, idx):
bonelessIm = plt.imread(self.data["boneless"].iloc[idx])
sourceIm = plt.imread(self.data["source"].iloc[idx])
sourceIm, bonelessIm = _check_if_array_3D( sourceIm, bonelessIm)
# Visualisation
fig, ax=plt.subplots(1,2)
ax[0].imshow(sourceIm, cmap="gray")
ax[1].imshow(bonelessIm, cmap="gray")
class POLYU_COVID19_CXR_CT_Cohort1(Dataset):
def __init__(self, data_normal, transform):
"""
Inputs:
data_normal: root directory holding the normal / non-suppressed images
transform: (optional) a torchvision.transforms.Compose series of transformations
Assumed that files corresponding to the same patient have the same name in both folders data_normal and data_BSE.
"""
sample = {"Patient": [], "source":[]}
for root, dirs, files in os.walk(data_normal):
for name in files:
if '.png' in name:
a_filepath = os.path.join(root, name)
# Patient code
head, tail = os.path.split(a_filepath)
patient_code_file = os.path.splitext(tail)[0]
# Place into lists
sample["Patient"].append(patient_code_file)
sample["source"].append(a_filepath)
self.data = | pd.DataFrame(sample) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[18]:
#Question 1
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from kmodes.kmodes import KModes as km
import seaborn as sns
import sklearn.cluster as cl
from sklearn.neighbors import NearestNeighbors as NN
import math
import numpy.linalg as linalg
import sklearn.neighbors as neighbors
import scipy
from mlxtend.preprocessing import TransactionEncoder
from mlxtend.frequent_patterns import apriori
from mlxtend.frequent_patterns import association_rules
file1 = "Groceries.csv"
df = pd.read_csv(file1)
item_group = df.groupby(['Customer'])['Item'].apply(list).values.tolist()
te = TransactionEncoder()
te_ary = te.fit(item_group).transform(item_group)
ItemIndicator = pd.DataFrame(te_ary, columns=te.columns_)
#a
df2 = pd.DataFrame(item_group)
data = []
for i in range(len(item_group)):
data.append(len(item_group[i]))
customer = [i for i in range(1,len(data)+1)]
#DataFrame containing unique number of items in each customer's Basket
item_df = pd.DataFrame(data=data,columns=['No of items'])
item_df['Customer']=customer
plt.figure(figsize=(10,10))
sns.distplot(item_df['No of items'])
plt.show()
#Percentiles of histogram
LQ,Median,UQ = np.percentile(item_df['No of items'],[25,50,75])
print("25th Percentile of Histogram:",LQ)
print("50th Percentile of Histogram:",Median)
print("75th Percentile of Histogram:",UQ)
#b
data2 = []
for j in range(len(item_group)):
data2.append(item_group[j])
df3 = pd.DataFrame(data=customer,columns=['Customer'])
minSup = 75/len(df3)
#Apriori Algorithm
frequent_itemsets = apriori(ItemIndicator, min_support = minSup, max_len = Median, use_colnames = True)
#itemsets with atleast 75 customers
total_itemsets = len(frequent_itemsets)
print("Total number of itemsets with atleast 75 customers",total_itemsets)
#largest k-value
count_k = []
k_itemset = [count_k.append(len(frequent_itemsets.iloc[i,1])) for i in range(len(frequent_itemsets))]
max_k_itemset = max(count_k)
print("The largest k-value among the itemsets",max_k_itemset)
#c
#Association Rules
assoc_rules = association_rules(frequent_itemsets, metric = "confidence", min_threshold = 0.01 )
#droping na values to make sure every rule has antecendent and consequents
assoc_rules.dropna()
total_assoc_rules = len(assoc_rules)
print("Total number of Association Rules with > 1% Confidence ",total_assoc_rules)
assoc_rules.columns
#d
print("Support Vs Confidence")
plt.figure(figsize=(10,10))
sns.scatterplot(data=assoc_rules,x="confidence",y="support",size="lift")
plt.show()
#e
assoc_rules_60 = association_rules(frequent_itemsets, metric = "confidence", min_threshold = 0.6 )
print("Association rules with confidence >= 60% \n",assoc_rules_60.loc[:,['antecedents','consequents','confidence','support','lift']])
#Question 2
file2 = "cars.csv"
df4 = pd.read_csv(file2)
cars_df = df4.loc[:,['Type','Origin','DriveTrain','Cylinders']]
#a
labels_type = list(cars_df['Type'].value_counts().index)
List_Of_Categories_In_Type = list(cars_df['Type'].value_counts())
print(" Frequencies for the categorical feature ‘Type’")
type_freq = pd.DataFrame(list(zip(labels_type,List_Of_Categories_In_Type)),columns=['Type','Count'])
print(type_freq)
#b
print("Frequencies for the categorical feature ‘Drive Train’")
labels_drivetrain = list(cars_df['DriveTrain'].value_counts().index)
List_Of_Categories_In_DriveTrain = list(cars_df['DriveTrain'].value_counts())
drivetrain_freq = pd.DataFrame(list(zip(labels_drivetrain,List_Of_Categories_In_DriveTrain)),columns=['DriveTrain','Count'])
print(drivetrain_freq)
#c
print("the distance between Origin = ‘Asia’ and Origin = ‘Europe’")
labels_origin = list(cars_df['Origin'].value_counts().index)
List_Of_Categories_In_Origin = list(cars_df['Origin'].value_counts())
origin_df = pd.DataFrame(List_Of_Categories_In_Origin)
origin = origin_df.T
o = origin.values.tolist()
ori_asia = cars_df.Origin.value_counts()['Asia']
ori_eur = cars_df.Origin.value_counts()['Europe']
#dis_origin_df = pd.DataFrame(o,columns=labels_origin)
dis_origin = (1/ori_asia)+(1/ori_eur)
print(dis_origin)
#d
print("the distance between Cylinders = 5 and Cylinders = Missing")
List_Of_Categories_In_Cylinders = list(cars_df['Cylinders'].value_counts())
labels_Cylinders = list(cars_df['Cylinders'].value_counts().index)
cylinders_df = pd.DataFrame(List_Of_Categories_In_Cylinders)
cylinder = cylinders_df.T
c = cylinder.values.tolist()
cyc_5 = cars_df.Cylinders.value_counts()[5.0]
cyc_nan = cars_df.Cylinders.value_counts(dropna=False)[np.NaN]
dis_cylinder_df = | pd.DataFrame(c,columns=labels_Cylinders) | pandas.DataFrame |
# coding: utf-8
# In[1]:
import pandas as pd
import os
import wiggum as wg
import numpy as np
import pytest
def test_basic_load_df_wages():
# We'll first load in some data, this has both regression and rate type trends. We will load it two ways and check that the structure is the same
# In[2]:
labeled_df_file = wg.LabeledDataFrame('data/wages_gender_rank_time_regression2/df.csv')
# In[3]:
labeled_df_dir = wg.LabeledDataFrame('data/wages_gender_rank_time_regression2')
# In[4]:
assert np.product(labeled_df_file.df.columns == labeled_df_dir.df.columns)
# In[5]:
assert labeled_df_file.df.shape == labeled_df_dir.df.shape
# In[6]:
compare_df = labeled_df_file.df == labeled_df_dir.df
assert np.product(compare_df.sum() == len(labeled_df_file.df))
# Next, we can infer the variable types and assign the roles then check that those match what was read from the saved copy
# In[7]:
labeled_df_file.infer_var_types()
roles = {'department':['independent','splitby'], 'year':['independent'],
'pay':['dependent'], 'gender':['independent','splitby']}
var_types = {'gender':'categorical'}
labeled_df_file.set_counts({var:False for var in labeled_df_file.df.columns})
labeled_df_file.set_roles(roles)
labeled_df_file.set_var_types(var_types)
assert np.product(labeled_df_file.meta_df.columns == labeled_df_dir.meta_df.columns)
assert labeled_df_file.meta_df.shape == labeled_df_dir.meta_df.shape
compare_meta_df = labeled_df_file.meta_df.dropna(axis=1) == labeled_df_dir.meta_df.dropna(axis=1)
assert np.product(compare_meta_df.sum() == len(labeled_df_dir.meta_df))
# compare_meta_df
# labeled_df_dir.meta_df.dropna(axis=1)
# Now, we've set this up, we can also save these configurations to load them in directly in the future
assert labeled_df_file.to_csvs('data/wages_test')
# Now confirm that all the files were written correctly.
assert sorted(os.listdir('data/wages_test/')) == ['df.csv', 'meta.csv', 'result_df.csv']
# it write the three DataFrames each out to their own .csv file in that directory. If that directory exists it will overwrite without warning, if not, also creates the directory.
#
# Now, we can can also load the data back
labeled_df = wg.LabeledDataFrame('data/wages_test')
labeled_df.meta_df
# And confirm that thiss is the same as what was written. First confirm the column headings are the same
assert np.product(labeled_df.meta_df.columns == labeled_df_dir.meta_df.columns)
# Then confirm the shape is the same
assert labeled_df.meta_df.shape == labeled_df_dir.meta_df.shape
# Then that non NaN values are all the same, combined with above the NaNs must be in the same location, but np.NaN == np.Nan asserts to false
# In[18]:
compare_meta_df = labeled_df.meta_df.dropna(axis=1) == labeled_df_dir.meta_df.dropna(axis=1)
assert np.product(compare_meta_df.sum() == len(labeled_df_dir.meta_df))
# compare_meta_df
# labeled_df_dir.meta_df.dropna(axis=1)
# In[19]:
assert np.product(labeled_df.df.columns == labeled_df_dir.df.columns)
# In[20]:
assert labeled_df.df.shape == labeled_df_dir.df.shape
# In[21]:
compare_df = labeled_df.df.dropna(axis=1) == labeled_df_dir.df.dropna(axis=1)
assert np.product(compare_df.sum() == len(labeled_df_dir.df))
# compare_meta_df
# labeled_df_dir.meta_df.dropna(axis=1)
# In[22]:
intersect_cols= ['gender','department']
labeled_df.add_intersectional(intersect_cols)
# Now check that that worked correctly
# In[23]:
intersectional_col_name = '_'.join(intersect_cols)
intersectional_correct = lambda row: row[intersectional_col_name] == '_'.join([row[icol] for icol in intersect_cols])
icol_correct = labeled_df.df.apply(intersectional_correct,axis=1)
assert np.product(icol_correct)
# In[24]:
labeled_df.add_quantile(['pay'])
q_limits = np.quantile(labeled_df.df['pay'],[.25,.75,1],)
limits = {n:q for n,q in zip(['low','mid','high'],q_limits)}
for q,df in labeled_df.df.groupby('payquantiles'):
a = df['pay'] <= limits[q]
assert np.product(a)
# In[26]:
assert labeled_df.get_vars_per_type('categorical') == ['department', 'gender', 'gender_department', 'payquantiles']
assert labeled_df.meta_df.loc['gender_department','dtype'] == 'object'
assert labeled_df.meta_df.loc['gender_department','var_type'] == 'categorical'
assert labeled_df.meta_df.loc['gender_department','role'] == 'splitby'
assert labeled_df.meta_df.loc['gender_department','isCount'] == False
# Check the utility fucntions
# In[29]:
assert labeled_df.get_vars_per_role('splitby') == ['department', 'gender', 'gender_department', 'payquantiles']
assert labeled_df.get_vars_per_role('independent') == ['year','department', 'gender']
assert labeled_df.get_vars_per_role('dependent') == ['pay']
# In[30]:
assert labeled_df.get_data_sample() == ['Max: 51.04 Min: 13.52',
'Max: 50.0 Min: 0.0',
'Support, Sales, Management, R&D',
'F, M',
'F_Support, M_Support, M_Sales, F_Sales, M_Management',
'mid, low, high']
# In[31]:
assert labeled_df.get_vars_per_type('categorical') == ['department', 'gender', 'gender_department', 'payquantiles']
assert labeled_df.get_vars_per_type('continuous') == ['pay','year']
# In[32]:
assert labeled_df.get_vars_per_roletype('independent','continuous') == ['year']
assert labeled_df.get_vars_per_roletype('independent','categorical') ==['department', 'gender']
# # Using Trends
#
# Trend objects define their name, how to compute the trend and how to choose which variables,
#
# extension will allow that the var lists may be passed to reduce which ones are computed
# In[33]:
corrobj = wg.All_Pearson()
corrobj.get_trend_vars(labeled_df)
assert corrobj.regression_vars == [('year', 'pay')]
assert len(corrobj.var_weight_list) == len(corrobj.regression_vars)
assert corrobj.set_vars== True
# In[34]:
rankobj = wg.Mean_Rank_Trend()
assert rankobj.get_trend_vars(labeled_df)
assert rankobj.target ==['pay']
assert rankobj.trendgroup == ['department', 'gender']
assert rankobj.set_vars== True
assert len(rankobj.var_weight_list) == len(rankobj.target)
# In[35]:
linreg_obj = wg.All_Linear_Trend()
linreg_obj.get_trend_vars(labeled_df)
assert linreg_obj.regression_vars == [('year', 'pay')]
assert len(linreg_obj.var_weight_list) == len(linreg_obj.regression_vars)
assert linreg_obj.set_vars== True
# # Computing Trends on a LabeledDataFrame
# There are two ways, we can use default setting and pass the names of the trend type or a trend object
# In[36]:
labeled_df.get_subgroup_trends_1lev(['pearson_corr'])
assert np.product(labeled_df.result_df.columns == ['independent', 'dependent', 'splitby', 'subgroup', 'agg_trend',
'agg_trend_strength', 'subgroup_trend', 'subgroup_trend_strength',
'trend_type', 'comparison_type'])
# In[38]:
# there are 10 fixed columns and the number of rows for this trend is below
num_reg_pairs = 1
num_depts = 4
num_genders = 2
num_quantiles = 3
num_dept_genders = num_genders*num_depts
num_pearson = num_reg_pairs*(num_depts+num_genders + num_dept_genders+ num_quantiles )
assert labeled_df.result_df.shape == (num_pearson,10)
# Now we can use a list of objects and apply multiple trends
# In[39]:
labeled_df.get_subgroup_trends_1lev([rankobj,linreg_obj])
num_lin = num_pearson
num_gender_idep = num_depts + num_dept_genders+ num_quantiles
num_dept_indep = num_genders + num_dept_genders+ num_quantiles
num_rank = num_gender_idep + num_dept_indep
total_rows_agg_sg = num_pearson + num_lin + num_rank
assert labeled_df.result_df.shape == (total_rows_agg_sg,10)
# We can see what types of trends were computed from `result_df`
# In[41]:
assert np.product(pd.unique(labeled_df.result_df['trend_type']) ==['pearson_corr', 'rank_trend', 'lin_reg'])
# In[42]:
assert pd.unique(labeled_df.result_df['comparison_type']) ==['aggregate-subgroup']
# We can also add trends that are structured for pairwise comparisons
# In[43]:
labeled_df.get_pairwise_trends_1lev([rankobj,linreg_obj])
# Again, check that the infrastructure of this by checking that the number of rows is correct
# In[44]:
num_dept_pairs = np.sum(list(range(num_depts)))
num_gender_pairs = np.sum(list(range(num_genders)))
num_dept_genders_pairs = np.sum(list(range(num_dept_genders)))
num_quantile_pairs = np.sum(list(range(num_quantiles)))
gender_indep_pairwise_rows = num_dept_pairs + num_dept_genders_pairs + num_quantile_pairs
dept_indep_pairwise_rows = num_gender_pairs + num_dept_genders_pairs + num_quantile_pairs
lin_reg_pairwise_rows = num_dept_pairs +num_gender_pairs + num_dept_genders_pairs + num_quantile_pairs
rank_pairwise_rows = gender_indep_pairwise_rows + dept_indep_pairwise_rows
total_rows = total_rows_agg_sg + lin_reg_pairwise_rows + rank_pairwise_rows
assert labeled_df.result_df.shape == (total_rows,13)
# In[45]:
assert list( | pd.unique(labeled_df.result_df['comparison_type']) | pandas.unique |
"""Contains methods and classes to collect data from
tushare API
"""
import pandas as pd
import tushare as ts
from tqdm import tqdm
class TushareDownloader :
"""Provides methods for retrieving daily stock data from
tushare API
Attributes
----------
start_date : str
start date of the data (modified from config.py)
end_date : str
end date of the data (modified from config.py)
ticker_list : list
a list of stock tickers (modified from config.py)
Methods
-------
fetch_data()
Fetches data from tushare API
date:date
Open: opening price
High: the highest price
Close: closing price
Low: lowest price
Volume: volume
Price_change: price change
P_change: fluctuation
ma5: 5-day average price
Ma10: 10 average daily price
Ma20:20 average daily price
V_ma5:5 daily average
V_ma10:10 daily average
V_ma20:20 daily average
"""
def __init__(self, start_date: str, end_date: str, ticker_list: list):
self.start_date = start_date
self.end_date = end_date
self.ticker_list = ticker_list
def fetch_data(self) -> pd.DataFrame:
"""Fetches data from Yahoo API
Parameters
----------
Returns
-------
`pd.DataFrame`
7 columns: A date, open, high, low, close, volume and tick symbol
for the specified stock ticker
"""
# Download and save the data in a pandas DataFrame:
data_df = pd.DataFrame()
for tic in tqdm(self.ticker_list, total=len(self.ticker_list)):
temp_df = ts.get_hist_data(tic[0:6],start=self.start_date,end=self.end_date)
temp_df["tic"] = tic[0:6]
data_df = data_df.append(temp_df)
data_df = data_df.reset_index(level="date")
# create day of the week column (monday = 0)
data_df = data_df.drop(["price_change","p_change","ma5","ma10","ma20","v_ma5","v_ma10","v_ma20"], 1)
data_df["day"] = | pd.to_datetime(data_df["date"]) | pandas.to_datetime |
# -*- coding:utf-8 -*-
# By:<NAME>
# Create:2019-12-23
# Update:2021-10-20
# For: Scrape data from weibo and a simple and not so rigours sentiment analysis based on sentiment dictionary
import requests
import re
import os
import time
import random
from lxml import etree
from datetime import datetime, timedelta
import pandas as pd
from urllib.request import quote, unquote
from fp.fp import FreeProxy
class ScrapePosts:
def __init__(self,kw=None,cookies=None,headers=None,use_prox=True,st=None,et=None,sort="hot",cr_url=True):
self.cookies = cookies
self.headers = headers
if use_prox:
self.new_proxy()
else:
self.proxies = None
self.keyword = quote(kw, encoding='utf-8') if kw is not None else None
self.starttime = datetime.strptime(st, '%Y/%m/%d') if st is not None else None
self.endtime = datetime.strptime(et, '%Y/%m/%d') if et is not None else None
self.sort = sort
self.url = self.get_url() if cr_url else None
def new_proxy(self, rand = True):
self.proxies = FreeProxy(rand=rand).get()
def change_endtime(self,date):
self.endtime = datetime.strptime(date, '%Y/%m/%d')
self.url = self.get_url()
def change_starttime(self,date):
self.starttime = datetime.strptime(date, '%Y/%m/%d')
self.url = self.get_url()
def change_kw(self,kw):
self.keyword = quote(kw, encoding='utf-8')
self.url = self.get_url()
def change_sort(self,sort):
self.sort = sort
self.url = self.get_url()
def get_filter(self):
self.keyword = input("Please input keyword:")
self.endtime = input("Please input end time(yyyy/mm/dd):")
self.starttime = input("Please input start time(yyyy/mm/dd):")
self.sort = input("Please choose sorting method(time/hot):")
# Sometimes it's ok to just put Chinese words into the url, but it will be better to encode with URL encoding
self.keyword = quote(self.keyword, encoding='utf-8')
self.starttime = datetime.strptime(self.starttime, '%Y/%m/%d')
self.endtime = datetime.strptime(self.endtime, '%Y/%m/%d')
# get the url, note that we need to paste the page= to the url
# and the function returns a list of urls, each of which searches for the posts within one day
def get_url(self):
# default start time is Jan-01, 2010, default sort method is by time(could be by 'hot')
search_url = 'https://weibo.cn/search/mblog?hideSearchFrame='
delta = self.endtime - self.starttime + timedelta(days=1)
url = [None] * delta.days
i = 0
while i < delta.days:
url[i] = search_url + "&keyword=" + self.keyword + "&advancedfilter=1" + "&starttime=" + (
self.starttime + timedelta(days=i)).strftime('%Y%m%d') + "&endtime=" + (
self.starttime + timedelta(days=i)).strftime('%Y%m%d') + "&sort=" + self.sort
i += 1
return url
# create a tiny function to create name
def save_html(self, url, html):
ed = re.findall(r'endtime=(.*?)&', url)[0]
pg = re.findall(r'page=(.*)', url)[0]
name = '_'.join([unquote(self.keyword), ed, pg])
save = open('.//html/%s.txt' % name, "w", encoding="utf-8")
save.write('%s' % html)
save.close()
# note that if you generate the url from geturl function, you will need to add the "&page=" to the url
def get_html(self, url, save_html=True, use_prox=True):
# find the headers, you will need the cookies that is freshly baked, you will need the Fiddler to get cookies
headers = {
'User-Agent': self.headers,
'Cookie': self.cookies
}
if use_prox:
proxies = {
"https": self.proxies.replace("http://",""),
"http": self.proxies.replace("http://", "")
}
response = requests.get(url, headers=headers, proxies=proxies)
else:
response = requests.get(url, headers=headers)
response.encoding = "utf-8"
# to know if we successfully get the response
if response.status_code != 200:
print('\nResponse Error!')
html = response.text
if save_html:
self.save_html(url, html)
html = bytes(html, encoding='utf-8')
html = etree.HTML(html)
return html
def total_page(self, html):
try:
page = html.xpath("//div[@class='pa']//div/text()")
page = str(page)
page = int(re.findall(r'/(.*?)页', str(page))[0])
if page > 100:
page = 100
return page
except Exception as e:
return 0
print(f'Error while getting the total page,{e}')
def parse_html(self, html):
post_list = html.xpath("//div[@class='c'][@id]")
info_list = []
for post in post_list:
poster = post.xpath(".//div/a[@class='nk']/text()")[0]
poster_url = post.xpath(".//div/a[@class='nk']/@href")[0]
post_date = post.xpath(".//div/span[@class='ct']/text()")[0]
post_like = post.xpath(".//div/a[@href]/text()")[-4]
post_repo = post.xpath(".//div/a[@href]/text()")[-3]
post_cmt = post.xpath(".//div/a[@href]/text()")[-2]
div = post.xpath(".//div")
if len(div) == 1:
post_txt = etree.tostring(post.xpath(".//div/span[@class='ctt']")[0], encoding="unicode")
post_txt = post_txt.replace('<span class="ctt">:', '')
post_txt = post_txt.replace(f'<span class="kt">{self.keyword}</span>', self.keyword)
post_txt = post_txt.replace('</span>\xa0', '')
# Here, as above, the data we get may contain nothing or only what the last user who repoed had written
# let's just tackle it later
o_poster, o_poster_url, o_post_txt, o_post_like, o_post_repo, o_post_cmt = None, None, None, None, None, None
elif len(div) == 2:
try:
temp_post = div[1].xpath(".//text()")
post_txt = " ".join(temp_post[:len(temp_post) - 9])
except Exception as e1:
post_txt, post_like, post_repo, post_cmt = None, None, None, None
print("Error in getting repo information, error type:%s" % e1)
if div[0].xpath(".//span[@class='cmt']/a[@href]/text()"):
o_poster = div[0].xpath(".//span[@class='cmt']/a[@href]/text()")[0]
o_poster_url = div[0].xpath(".//span[@class='cmt']/a/@href")[0]
o_post_txt = etree.tostring(div[0].xpath(".//span[@class='ctt']")[0], encoding="unicode")
o_post_txt = re.sub(r'<[\w+/](.*?)[\"/\w]>', '', o_post_txt)
o_post_txt = re.sub(r'[\s]+', '', o_post_txt)
o_post_like = div[0].xpath(".//span[@class='cmt']/text()")[2]
o_post_repo = div[0].xpath(".//span[@class='cmt']/text()")[3]
o_post_cmt = div[0].xpath(".//a[@class='cc']/text()")[0]
else:
o_poster, o_poster_url, o_post_txt, o_post_like, o_post_repo, o_post_cmt = None, None, None, None, None, None
# print("Warning: this user can be posting a pic, userID is %s.\r" % poster)
elif len(div) == 3:
try:
temp_post = div[2].xpath(".//text()")
post_txt = " ".join(temp_post[:len(temp_post) - 9])
except Exception as e3:
post_txt, post_like, post_repo, post_cmt = None, None, None, None
print("Error in getting repo information, error type:%s" % e3)
o_poster = div[0].xpath(".//span[@class='cmt']/a[@href]/text()")[0]
o_poster_url = div[0].xpath(".//span[@class='cmt']/a/@href")[0]
# here we can not just choose the text, because people might have @others and posts some hashtags which
# will be eliminated if we only return the text
o_post_txt = etree.tostring(div[0].xpath(".//span[@class='ctt']")[0], encoding="unicode")
o_post_txt = re.sub(r'<[\w+/](.*?)[\"/\w]>', '', o_post_txt)
o_post_txt = re.sub(r'[\s]+', '', o_post_txt)
o_post_like = div[1].xpath(".//span[@class='cmt']/text()")[0]
o_post_repo = div[1].xpath(".//span[@class='cmt']/text()")[1]
o_post_cmt = div[1].xpath(".//a[@class='cc']/text()")[0]
else:
post_txt, post_like, post_repo, post_cmt = None, None, None, None
o_poster, o_poster_url, o_post_txt, o_post_like, o_post_repo, o_post_cmt = None, None, None, None, None, None
print("Error in implement")
info = {
'user_id': poster,
'user_url': poster_url,
'post_date': post_date,
'post_content': post_txt,
'post_like': post_like,
'post_repo': post_repo,
'post_comment': post_cmt,
'original_poster_id': o_poster,
'original_poster_url': o_poster_url,
'original_post_content': o_post_txt,
'original_post_like': o_post_like,
'original_post_repo': o_post_repo,
'original_post_comment': o_post_cmt
}
info_list.append(info)
info_list = pd.DataFrame(info_list)
return (info_list)
def post_list(self, get_ttp = True,use_prox=True):
info_list = pd.DataFrame()
# from the first page, get the total page of each day and also the first html
timer = 0
for url in self.url:
timer = timer + 1
i = 1
child_url = []
child_url.append(url + "&page=1")
try:
html = self.get_html(child_url[0],use_prox=use_prox)
info = self.parse_html(html)
# save the data just in case
if not os.path.isfile("%s\Desktop\data\%s_append.csv" % (os.environ["HOMEPATH"],unquote(self.keyword))):
info.to_csv("%s\Desktop\data\%s_append.csv" % (os.environ["HOMEPATH"],unquote(self.keyword)), header=True)
else: # else it exists so append without writing the header
info.to_csv("%s\Desktop\data\%s_append.csv" % (os.environ["HOMEPATH"],unquote(self.keyword)),
mode='a', header=False)
info_list = pd.concat([info_list, info], axis=0, ignore_index=True)
# print("Great! Make it again!")
ttp = self.total_page(html) if get_ttp else 100
# sleep
time.sleep(random.uniform(1, 4))
# the second loop is to get html from each page of the day
print("Try fetch data for day {}".format(re.findall(r'endtime=(.*?)&', url)[0]))
print(' Get a cup of tea :p '.center(100 // 2, '='))
start = time.perf_counter()
while i < ttp:
i = i + 1
child_url.append(url + "&page=%s" % i)
try:
html = self.get_html(child_url[i - 1],use_prox=use_prox)
info = self.parse_html(html)
# save the data just in case
if not os.path.isfile(
"%s\Desktop\data\%s_append.csv" % (os.environ["HOMEPATH"], unquote(self.keyword))):
info.to_csv(
"%s\Desktop\data\%s_append.csv" % (os.environ["HOMEPATH"], unquote(self.keyword)),
header=True)
else: # else it exists so append without writing the header
info.to_csv(
"%s\Desktop\data\%s_append.csv" % (os.environ["HOMEPATH"], unquote(self.keyword)),
mode='a', header=False)
info_list = pd.concat([info_list, info], axis=0, ignore_index=True)
time.sleep(random.uniform(1,2))
except Exception as e:
print("Error in getting info list, cheack the PostList. Error type: %s" % e)
if use_prox:
self.new_proxy()
time.sleep(5)
a = "*" * int(50 * i / ttp)
b = '.' * int(50 * (1 - (i / ttp)))
c = i / ttp * 100
dur = time.perf_counter() - start
left = dur / i * (ttp - i) / 60
print('\r{:^3.0f}%[{}->{}] Dur: {:.2f}min; Approx {:.2f}min left'.format(c, a, b, dur / 60, left),
end='')
print('\n' + ' Grattis! Everything Works! '.center(100 // 2, '=') + '\n' + '\n')
except Exception as e:
print("Error in getting info list, cheack the PostList. Error type: %s" % e)
if use_prox:
self.new_proxy()
time.sleep(5)
return info_list
'''
This function is write for the data scrapped and stored by the 'execute' with the object extract
Since the data scrapped are not perfect for the text analysis task so we do a little modification
to the data as well, after the process, new data frame will be stored in //analyse data/data
Note:
the function will return the data frame as well in the order of norm_user(who post/repo the
post), norm_user2(who are repoed, but we don't have their url,likes,ect), V_user(who are repoed,
and who are mostly popular weiboers)
'''
def divide_post(self,post_list,id_prefix):
post_list = post_list.drop_duplicates().reset_index(drop=True)
main_post = pd.DataFrame()
other_post = pd.DataFrame()
vip_post = pd.DataFrame()
print(' Get a cup of tea :p '.center(100 // 2, '='))
j = 0
start = time.perf_counter()
for i in post_list.index:
test_str = post_list['post_content'][i]
# pa is for the post we have scraped
pa_mpid = "%s%s%06d"%(id_prefix,0,i+1)
pa_uid = post_list['user_id'][i]
pa_url = post_list['user_url'][i].replace("https://weibo.cn/","")
try:
pa_time = re.findall(r"\d{2}月\d{2}日\s\d{2}:\d{2}", post_list['post_date'][i])[0]
pa_time = datetime.strptime(pa_time, '%m月%d日 %H:%M')
pa_time = pa_time.replace(year=2020)
except:
pa_time = None
try:
pa_dev = re.findall(r"来自(.*)", post_list['post_date'][i])[0]
except:
pa_dev = None
pa_like = int(re.sub("[\D]","",post_list['post_like'][i]))
pa_repo = int(re.sub("[\D]","",post_list['post_repo'][i]))
pa_cmt = int(re.sub("[\D]","",post_list['post_comment'][i]))
# v is for the post that is been reposted, most of which is popular posters
v_post = post_list['original_post_content'][i]
try:
v_uid = post_list['original_poster_id'][i]
v_url = post_list['original_poster_url'][i].replace("https://weibo.cn/","")
v_like = int(re.sub("[\D]","",post_list['original_post_like'][i]))
v_repo = int(re.sub("[\D]","",post_list['original_post_repo'][i]))
v_cmt = int(re.sub("[\D]","",post_list['original_post_comment'][i]))
temp_v = {
'MP_id': pa_mpid,
'OP_id': "%s%s%06d"%(id_prefix,1,i+1),
'OP_user_id': v_uid,
'OP_user_url': v_url,
'OP_content': v_post,
'OP_like': v_like,
'OP_repo': v_repo,
'OP_cmt': v_cmt
}
temp_v = pd.DataFrame(temp_v, index=[0])
vip_post = pd.concat([vip_post, temp_v], ignore_index=True, axis=0)
except:
v_url = None
# print('\rThere is no original post!')
try:
pa_post = re.findall(r'转发理由: (.*?)//', test_str)[0]
pa_post = re.sub(r'[\s]+', '', pa_post)
except:
pa_post = None
temp_main = {
'MP_id': pa_mpid,
'MP_user_id': pa_uid,
'MP_user_url': pa_url,
'MP_date': pa_time,
'MP_dev': pa_dev,
'MP_content': pa_post,
'MP_like': pa_like,
'MP_repo': pa_repo,
'MP_cmt': pa_cmt,
'OP_uer_url': v_url
}
temp_main = pd.DataFrame(temp_main, index=[0])
main_post = | pd.concat([main_post, temp_main], ignore_index=True, axis=0) | pandas.concat |
#################################################################
# #
# Useful python scripts for interfacing #
# with datasets and programs #
# #
#################################################################
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#from pypdb import describe_pdb
import os, sys
import tqdm
import Bio
def ProTherm_data():
data_path = "data/ProTherm+HotMusic.csv"
dataset = | pd.read_csv(data_path) | pandas.read_csv |
'''
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import sys
import pandas as pd
import requests
from datetime import datetime
def prod40(fte, prod):
df = pd.read_csv(fte, encoding='latin-1')
#drop Region = Nan: includes all invalid dates
df = df[df['Region_origen'].notna()]
df['Cod_region_origen'] = df['Cod_region_origen'].astype(int)
df['Cod_region_destino'] = df['Cod_region_destino'].astype(int)
#stardardize fechas
df['Inicio_semana'] = pd.to_datetime(df['Inicio_semana'], format='%d-%m-%Y')
df['Fin_semana'] = pd.to_datetime(df['Fin_semana'], format='%d-%m-%Y')
df['Inicio_semana'] = df['Inicio_semana'].astype(str)
df['Fin_semana'] = df['Fin_semana'].astype(str)
#drop columnas Ano y mes
df.drop(columns=['Año', 'Mes'], inplace=True)
print(df.to_string())
df.to_csv(prod + 'TransporteAereo_std.csv', index=False)
def prod40_from_API(url, api_key, prod):
print('Generating prod40 from API')
response = requests.get(url + api_key)
my_list = response.json()['aéreo nacional - movimientos y pasajeros']
#print(my_list)
df = pd.DataFrame(my_list, dtype=str)
#print(list(df))
# hay que comparar el mes con el principio de inicioSemana y finsemana:
# Si son iguales, corresponde al mes
# si no, corresponde al dia.
for i in range(len(df)):
mes = df.loc[i, 'mes']
iniSemana = df.loc[i, 'inicioSemana']
finDe = df.loc[i, 'finsemana']
anio = df.loc[i,'anio']
print('mes: ' + mes)
print('iniSemana: ' + iniSemana[:2])
print('finDe: ' + finDe[:2])
if int(mes) == int(iniSemana[:2]):
# print('mes primero en inisemana')
df.loc[i, 'inicioSemana'] = pd.to_datetime(df.loc[i, 'inicioSemana'], dayfirst=False)
else:
# print('dia primero en inisemana')
df.loc[i, 'inicioSemana'] = pd.to_datetime(df.loc[i, 'inicioSemana'], dayfirst=True)
if int(mes) == int(finDe[:2]):
# print('mes primero en finde')
df.loc[i, 'finsemana'] = pd.to_datetime(df.loc[i, 'finsemana'], dayfirst=False)
else:
# print('dia primero en finde')
df.loc[i, 'finsemana'] = pd.to_datetime(df.loc[i, 'finsemana'], dayfirst=True)
df['inicioSemana'] = pd.to_datetime(df['inicioSemana'], dayfirst=True)
df['finsemana'] = pd.to_datetime(df['finsemana'], dayfirst=True)
# drop unused columns
df.drop(columns=['anio', 'mes'], inplace=True)
df_localidades = | pd.read_csv('../input/JAC/JAC_localidades.csv') | pandas.read_csv |
import logging
import re
import time
from urllib.parse import parse_qs
from urllib.parse import urlparse
import pandas as pd
import requests
from bs4 import BeautifulSoup
from covidata import config
from covidata.persistencia.dao import persistir_dados_hierarquicos
def pt_PortoAlegre():
url = config.url_pt_PortoAlegre
parsed_uri = urlparse(url)
url_base = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
page = requests.get(url)
parser = BeautifulSoup(page.content, 'lxml')
proximas_paginas = __get_paginacao(parser, url_base)
nomes_colunas_documentos = []
linhas_df = []
linhas_df_documentos = []
linhas_df_itens = []
linhas_df_documentos, linhas_df_itens, nomes_colunas_documentos = __processar_pagina(linhas_df,
linhas_df_documentos,
linhas_df_itens,
nomes_colunas_documentos,
parser, url_base)
for pagina in proximas_paginas:
parser = __parse(pagina)
linhas_df_documentos, linhas_df_itens, nomes_colunas_documentos = __processar_pagina(linhas_df,
linhas_df_documentos,
linhas_df_itens,
nomes_colunas_documentos,
parser, url_base)
nomes_colunas = ['Número da licitação', 'Objeto', 'Tipo', 'Status', 'Aplicar o Decreto 10.024/2019',
'Início de propostas', 'Final de propostas', 'Limite para impugnações', 'Data de Abertura',
'Pregoeiro', 'Autoridade Competente', 'Apoio', 'Origem dos Recursos', 'Operação']
df = | pd.DataFrame(linhas_df, columns=nomes_colunas) | pandas.DataFrame |
import pandas as pd
import yfinance as yf
import altair as alt
from pandas_datareader import data
import streamlit as st
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
plt.style.use("fivethirtyeight")
# For reading stock data from yahoo
from datetime import datetime
st.header("Part A: Exploring the volatility")
st.title("Did the crypto volatility affect stock prices?")
st.caption("1. Here, we can select Bitcoin and any other stock for comparison. Select two, or multiple stocks for comparison")
stock_list = ('BTC-USD','ETH-USD','TSLA','AAPL', 'GOOG', 'MSFT', 'AMZN')
dropdown = st.multiselect('Select stocks for comparison', stock_list)
start = st.date_input('Start',value=pd.to_datetime('2021-01-01'))
end = st.date_input('End',value=pd.to_datetime('today'))
def stock_returns(df):
rel=df.pct_change()
cumret = (1+rel).cumprod() - 1
cumret = cumret.fillna(0)
return cumret
if len(dropdown) > 0:
df = stock_returns(yf.download(dropdown,start,end)['Adj Close'])
st.line_chart(df)
df = stock_returns(yf.download(stock_list,start,end)['Adj Close'])
#Interaction = stock_returns(yf.download(stock_list,datetime(end.year - 1, end.month, end.day),end)['Adj Close'])
df.reset_index(inplace=True)
df = df.rename(columns = {'index':'Date'})
df= df.melt(id_vars="Date")
df=df.rename(columns={"Date": "date", "variable": "company", "value": "return"})
portfolio=df
interval = alt.selection_interval(encodings=['x'])
scatter = alt.Chart(portfolio).mark_line().encode(
x='date',
y='return',
color='company',
).properties(
width=550,
selection=interval
)
bar = alt.Chart(portfolio).mark_bar().encode(
x='average(return)',
y='company',
color='company',
).properties(
width=550,
).transform_filter(
interval
)
return_comparison_viz = scatter & bar
st.title("Did the crypto volatility affect stock returns?")
st.caption("2. Both the visualization represent stock returns over the last year. Click on the graph and select a time-period you want to explore. Slide your selection across the graph, to view the change in returns for each stock at the bottom ")
st.write(return_comparison_viz)
st.header("Part B: Dwelling into the Percentages")
st.title("Does History repeat itself!")
st.subheader("Enter the Stock Code:")
st.caption("Please enter the stock code (ticker code) which you want to analyse and see visualizations for. We are using Yahoo Finance API to fetch the real-time data, and here we can render visulizations for all stock.")
stock_ticker = st.text_input("Please enter the ticker code for stock whose analysis you would like to Visualize", 'AAPL')
#stock_ticker = 'SNAP'
# start = pd.to_datetime(['2007-01-01']).astype(int)[0]//10**9 # convert to unix timestamp.
start = pd.to_datetime(['today']) - pd.DateOffset(years=5)
start = start.astype(int)[0]//10**9
# start = pd.to_datetime(['today']-5).astype(int)[0]//10**9 # convert to unix timestamp.
end = pd.to_datetime(['today']).astype(int)[0]//10**9
# end = pd.to_datetime(['2020-12-31']).astype(int)[0]//10**9 # convert to unix timestamp.
url = 'https://query1.finance.yahoo.com/v7/finance/download/' + stock_ticker + '?period1=' + str(start) + '&period2=' + str(end) + '&interval=1d&events=history'
df = pd.read_csv(url)
def daily_percent_change(df):
df.loc[0, '% Change'] = 0
for i in range(1, len(df)):
df.loc[i, '% Change'] = ((df.loc[i,'Adj Close'] - df.loc[i-1,'Adj Close']) / df.loc[i-1,'Adj Close']) * 100
return df
df = daily_percent_change(df)
df_top_10_positive = df.sort_values('% Change').tail(10)
df_top_10_negative = df.sort_values('% Change').head(10)
top_positive = alt.Chart(df_top_10_positive).mark_bar(color='green',tooltip=True).encode(
x = 'Date',
y = '% Change'
).properties(
title='Top 10 Positive percentage changes for the Stock'
)
top_negative = alt.Chart(df_top_10_negative).mark_bar(color='red', tooltip=True).encode(
x = 'Date',
y = '% Change'
).properties(
title='Top 10 Negative percentage changes for the Stock'
)
st.header("Does Percentage Changes history mean something?")
st.caption("The first two juxtaposed visualizations are for top 10 positive and top 10 negative percentage changes in last 5 years of the stock trading history. It gives the date and amount of percentage change for a that particular date")
st.altair_chart(top_positive | top_negative)
st.caption("Here, we plot percentage changes historical trend and compare with its frequency of occurence. Also, you can adjust the slider to see info about specific range of percentage fluctationn. This helps to understand about volatility of stocks and unravel hidden patterns about stock price fluctation pattern.")
#df
#pip install streamlit
pct_range = st.slider('% Change',
min_value=int(df['% Change'].min()),
max_value=int(df['% Change'].max()),
value=(int(df['% Change'].min()), int(df['% Change'].max())))
def get_slice_membership(df, pct_range):
labels = pd.Series([1] * len(df), index=df.index)
if pct_range is not None:
labels &= df['% Change'] >= pct_range[0]
labels &= df['% Change'] <= pct_range[1]
return labels
slice_labels = get_slice_membership(df, pct_range)
pct_change_slice = df[slice_labels]
chart = alt.Chart(pct_change_slice, title='In Slice').mark_bar(tooltip=True).encode(
alt.X('% Change', bin=alt.Bin(step=1)),
alt.Y('count()')
).interactive()
st.altair_chart(chart, use_container_width=True)
st.title("Does History repeat itself?")
st.caption("This is the life time interactive price curve for the stock. Please feel free to zoom and interact with the graph to see if there is any repeating pattern in here. Do we think historical data of the price repeats after a certain period of time. Also, we render critical metrics to support this graph such as real-time price of the stock, percentage change in price and 52 week price trend of the stock. The output of price is in the currency the stock is traded in such as USD, EUR etc. ")
start = | pd.to_datetime(['1970-01-01']) | pandas.to_datetime |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.