prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pytest
import pandas as pd
from pandas import compat
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.util.testing import assert_frame_equal, assert_raises_regex
COMPRESSION_TYPES = [None, 'bz2', 'gzip',
pytest.param('xz', marks=td.skip_if_no_lzma)]
def decompress_file(path, compression):
if compression is None:
f = open(path, 'rb')
elif compression == 'gzip':
import gzip
f = gzip.GzipFile(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.open(path, 'rb')
else:
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
result = f.read().decode('utf8')
f.close()
return result
@pytest.mark.parametrize('compression', COMPRESSION_TYPES)
def test_compression_roundtrip(compression):
df = pd.DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with tm.ensure_clean() as path:
df.to_json(path, compression=compression)
assert_frame_equal(df, pd.read_json(path, compression=compression))
# explicitly ensure file was compressed.
uncompressed_content = decompress_file(path, compression)
assert_frame_equal(df, pd.read_json(uncompressed_content))
def test_compress_zip_value_error():
df = pd.DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with tm.ensure_clean() as path:
import zipfile
pytest.raises(zipfile.BadZipfile, df.to_json, path, compression="zip")
def test_read_zipped_json():
uncompressed_path = tm.get_data_path("tsframe_v012.json")
uncompressed_df = pd.read_json(uncompressed_path)
compressed_path = tm.get_data_path("tsframe_v012.json.zip")
compressed_df = pd.read_json(compressed_path, compression='zip')
assert_frame_equal(uncompressed_df, compressed_df)
@pytest.mark.parametrize('compression', COMPRESSION_TYPES)
def test_with_s3_url(compression):
boto3 = pytest.importorskip('boto3')
pytest.importorskip('s3fs')
moto = pytest.importorskip('moto')
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
with moto.mock_s3():
conn = boto3.resource("s3", region_name="us-east-1")
bucket = conn.create_bucket(Bucket="pandas-test")
with tm.ensure_clean() as path:
df.to_json(path, compression=compression)
with open(path, 'rb') as f:
bucket.put_object(Key='test-1', Body=f)
roundtripped_df = pd.read_json('s3://pandas-test/test-1',
compression=compression)
assert_frame_equal(df, roundtripped_df)
@pytest.mark.parametrize('compression', COMPRESSION_TYPES)
def test_lines_with_compression(compression):
with tm.ensure_clean() as path:
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
df.to_json(path, orient='records', lines=True, compression=compression)
roundtripped_df = pd.read_json(path, lines=True,
compression=compression)
assert_frame_equal(df, roundtripped_df)
@pytest.mark.parametrize('compression', COMPRESSION_TYPES)
def test_chunksize_with_compression(compression):
with tm.ensure_clean() as path:
df = pd.read_json('{"a": ["foo", "bar", "baz"], "b": [4, 5, 6]}')
df.to_json(path, orient='records', lines=True, compression=compression)
roundtripped_df = pd.concat(pd.read_json(path, lines=True, chunksize=1,
compression=compression))
| assert_frame_equal(df, roundtripped_df) | pandas.util.testing.assert_frame_equal |
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PYPY
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
def test_tricky_container(self):
if not hasattr(self, "unicode_container"):
pytest.skip("Need unicode_container to test with this")
repr(self.unicode_container)
str(self.unicode_container)
class CheckImmutable:
mutable_regex = re.compile("does not support mutable operations")
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to pytest.raises
# (after the Exception kind).
with pytest.raises(TypeError):
self.mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
with pytest.raises(TypeError):
delegate.foo
with pytest.raises(TypeError):
delegate.foo = 5
with pytest.raises(TypeError):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops:
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:
# don't test boolean / integer dtypes
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name="a")
self.int_index = tm.makeIntIndex(10, name="a")
self.float_index = tm.makeFloatIndex(10, name="a")
self.dt_index = tm.makeDateIndex(10, name="a")
self.dt_tz_index = tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")
self.period_index = tm.makePeriodIndex(10, name="a")
self.string_index = tm.makeStringIndex(10, name="a")
self.unicode_index = tm.makeUnicodeIndex(10, name="a")
arr = np.random.randn(10)
self.bool_series = Series(arr, index=self.bool_index, name="a")
self.int_series = Series(arr, index=self.int_index, name="a")
self.float_series = Series(arr, index=self.float_index, name="a")
self.dt_series = Series(arr, index=self.dt_index, name="a")
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name="a")
self.string_series = Series(arr, index=self.string_index, name="a")
self.unicode_series = Series(arr, index=self.unicode_index, name="a")
types = ["bool", "int", "float", "dt", "dt_tz", "period", "string", "unicode"]
self.indexes = [getattr(self, "{}_index".format(t)) for t in types]
self.series = [getattr(self, "{}_series".format(t)) for t in types]
# To test narrow dtypes, we use narrower *data* elements, not *index* elements
index = self.int_index
self.float32_series = Series(arr.astype(np.float32), index=index, name="a")
arr_int = np.random.choice(10, size=10, replace=False)
self.int8_series = Series(arr_int.astype(np.int8), index=index, name="a")
self.int16_series = Series(arr_int.astype(np.int16), index=index, name="a")
self.int32_series = Series(arr_int.astype(np.int32), index=index, name="a")
self.uint8_series = Series(arr_int.astype(np.uint8), index=index, name="a")
self.uint16_series = Series(arr_int.astype(np.uint16), index=index, name="a")
self.uint32_series = Series(arr_int.astype(np.uint32), index=index, name="a")
nrw_types = ["float32", "int8", "int16", "int32", "uint8", "uint16", "uint32"]
self.narrow_series = [getattr(self, "{}_series".format(t)) for t in nrw_types]
self.objs = self.indexes + self.series + self.narrow_series
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index, op), index=o.index, name="a")
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these could be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(
expected, np.ndarray
):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
err = AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
err = TypeError
with pytest.raises(err):
getattr(o, op)
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_binary_ops_docs(self, klass):
op_map = {
"add": "+",
"sub": "-",
"mul": "*",
"mod": "%",
"pow": "**",
"truediv": "/",
"floordiv": "//",
}
for op_name in op_map:
operand1 = klass.__name__.lower()
operand2 = "other"
op = op_map[op_name]
expected_str = " ".join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = " ".join([operand2, op, operand1])
assert expected_str in getattr(klass, "r" + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
result = None != o # noqa
assert result.iat[0]
assert result.iat[1]
if is_datetime64_dtype(o) or is_datetime64tz_dtype(o):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ["shape", "dtype", "T", "nbytes"]:
assert getattr(o, p, None) is not None
# deprecated properties
for p in ["flags", "strides", "itemsize"]:
with tm.assert_produces_warning(FutureWarning):
assert getattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, "base")
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
o.item() # len > 1
assert o.ndim == 1
assert o.size == len(o)
with tm.assert_produces_warning(FutureWarning):
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_value_counts_unique_nunique(self):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._values
if isinstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
# take-based repeat
indices = np.repeat(np.arange(len(o)), range(1, len(o) + 1))
rep = values.take(indices)
o = klass(rep, index=idx, name="a")
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Series(
range(10, 0, -1), index=expected_index, dtype="int64", name="a"
)
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
assert result.index.name is None
assert result.name == "a"
result = o.unique()
if isinstance(o, Index):
assert isinstance(result, o.__class__)
tm.assert_index_equal(result, orig)
assert result.dtype == orig.dtype
elif is_datetime64tz_dtype(o):
# datetimetz Series returns array of Timestamp
assert result[0] == orig[0]
for r in result:
assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(
result.astype(object), orig._values.astype(object)
)
else:
tm.assert_numpy_array_equal(result, orig.values)
assert result.dtype == orig.dtype
assert o.nunique() == len(np.unique(o.values))
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_value_counts_unique_nunique_null(self, null_obj):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._ndarray_values
if not self._allow_na_ops(o):
continue
# special assign to the numpy array
if is_datetime64tz_dtype(o):
if isinstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = iNaT
values = o._shallow_copy(v)
else:
o = o.copy()
o[0:2] = pd.NaT
values = o._values
elif needs_i8_conversion(o):
values[0:2] = iNaT
values = o._shallow_copy(values)
else:
values[0:2] = null_obj
# check values has the same dtype as the original
assert values.dtype == o.dtype
# create repeated values, 'n'th element is repeated by n+1
# times
if isinstance(o, (DatetimeIndex, PeriodIndex)):
expected_index = o.copy()
expected_index.name = None
# attach name to klass
o = klass(values.repeat(range(1, len(o) + 1)))
o.name = "a"
else:
if isinstance(o, DatetimeIndex):
expected_index = orig._values._shallow_copy(values)
else:
expected_index = Index(values)
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
# check values has the same dtype as the original
assert o.dtype == orig.dtype
# check values correctly have NaN
nanloc = np.zeros(len(o), dtype=np.bool)
nanloc[:3] = True
if isinstance(o, Index):
tm.assert_numpy_array_equal(pd.isna(o), nanloc)
else:
exp = Series(nanloc, o.index, name="a")
tm.assert_series_equal(pd.isna(o), exp)
expected_s_na = Series(
list(range(10, 2, -1)) + [3],
index=expected_index[9:0:-1],
dtype="int64",
name="a",
)
expected_s = Series(
list(range(10, 2, -1)),
index=expected_index[9:1:-1],
dtype="int64",
name="a",
)
result_s_na = o.value_counts(dropna=False)
tm.assert_series_equal(result_s_na, expected_s_na)
assert result_s_na.index.name is None
assert result_s_na.name == "a"
result_s = o.value_counts()
tm.assert_series_equal(o.value_counts(), expected_s)
assert result_s.index.name is None
assert result_s.name == "a"
result = o.unique()
if isinstance(o, Index):
tm.assert_index_equal(result, Index(values[1:], name="a"))
elif is_datetime64tz_dtype(o):
# unable to compare NaT / nan
tm.assert_extension_array_equal(result[1:], values[2:])
assert result[0] is pd.NaT
else:
tm.assert_numpy_array_equal(result[1:], values[2:])
assert pd.isna(result[0])
assert result.dtype == orig.dtype
assert o.nunique() == 8
assert o.nunique(dropna=False) == 9
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_inferred(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=["b", "a", "d", "c"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list("acbd")).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list("cdab"))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"])
tm.assert_series_equal(hist, expected)
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_bins(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
# bins
with pytest.raises(TypeError):
s.value_counts(bins=1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({Interval(0.997, 3.0): 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({Interval(0.997, 3.0): 1.0})
tm.assert_series_equal(res1n, exp1n)
if isinstance(s1, Index):
tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))
else:
exp = np.array([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(s1.unique(), exp)
assert s1.nunique() == 3
# these return the same
res4 = s1.value_counts(bins=4, dropna=True)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4 = s1.value_counts(bins=4, dropna=False)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series([0.5, 0.25, 0.25, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ["a", "b", "b", "b", np.nan, np.nan, "d", "d", "a", "a", "b"]
s = klass(s_values)
expected = Series([4, 3, 2], index=["b", "a", "d"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(["a", "b", np.nan, "d"])
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.array(["a", "b", np.nan, "d"], dtype=object)
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 3
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected, check_index_type=False)
# returned dtype differs depending on original
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), Index([]), exact=False)
else:
tm.assert_numpy_array_equal(s.unique(), np.array([]), check_dtype=False)
assert s.nunique() == 0
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_datetime64(self, klass):
# GH 3002, datetime64[ns]
# don't test names though
txt = "\n".join(
[
"xxyyzz20100101PIE",
"xxyyzz20100101GUM",
"xxyyzz20100101EGG",
"xxyyww20090101EGG",
"foofoo20080909PIE",
"foofoo20080909GUM",
]
)
f = StringIO(txt)
df = pd.read_fwf(
f, widths=[6, 8, 3], names=["person_id", "dt", "food"], parse_dates=["dt"]
)
s = klass(df["dt"].copy())
s.name = None
idx = pd.to_datetime(
["2010-01-01 00:00:00", "2008-09-09 00:00:00", "2009-01-01 00:00:00"]
)
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np_array_datetime64_compat(
["2010-01-01 00:00:00", "2009-01-01 00:00:00", "2008-09-09 00:00:00"],
dtype="datetime64[ns]",
)
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), DatetimeIndex(expected))
else:
tm.assert_numpy_array_equal(s.unique(), expected)
assert s.nunique() == 3
# with NaT
s = df["dt"].copy()
s = klass(list(s.values) + [pd.NaT])
result = s.value_counts()
assert result.index.dtype == "datetime64[ns]"
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)
unique = s.unique()
assert unique.dtype == "datetime64[ns]"
# numpy_array_equal cannot compare pd.NaT
if isinstance(s, Index):
exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT])
tm.assert_index_equal(unique, exp_idx)
else:
tm.assert_numpy_array_equal(unique[:3], expected)
assert pd.isna(unique[3])
assert s.nunique() == 3
assert s.nunique(dropna=False) == 4
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td, name="dt")
result = td.value_counts()
expected_s = Series([6], index=[Timedelta("1day")], name="dt")
tm.assert_series_equal(result, expected_s)
expected = TimedeltaIndex(["1 days"], name="dt")
if isinstance(td, Index):
tm.assert_index_equal(td.unique(), expected)
else:
tm.assert_numpy_array_equal(td.unique(), expected.values)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2, name="dt")
result2 = td2.value_counts()
tm.assert_series_equal(result2, expected_s)
def test_factorize(self):
for orig in self.objs:
o = orig.copy()
if isinstance(o, Index) and o.is_boolean():
exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp)
exp_uniques = o
exp_uniques = Index([False, True])
else:
exp_arr = np.array(range(len(o)), dtype=np.intp)
exp_uniques = o
codes, uniques = o.factorize()
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(uniques, Index(orig), check_names=False)
else:
# factorize explicitly resets name
tm.assert_index_equal(uniques, exp_uniques, check_names=False)
def test_factorize_repeated(self):
for orig in self.objs:
o = orig.copy()
# don't test boolean
if isinstance(o, Index) and o.is_boolean():
continue
# sort by value, and create duplicates
if isinstance(o, Series):
o = o.sort_values()
n = o.iloc[5:].append(o)
else:
indexer = o.argsort()
o = o.take(indexer)
n = o[5:].append(o)
exp_arr = np.array(
[5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp
)
codes, uniques = n.factorize(sort=True)
| tm.assert_numpy_array_equal(codes, exp_arr) | pandas.util.testing.assert_numpy_array_equal |
'''
Script to convert txt results to csv
'''
import csv
import glob
import os
import sys
import pandas as pd
def parse_gpt2_file(filename: str):
'''
Parses a GPT2 file to return all the results
'''
results = []
curr_output = ''
with open(filename, 'r') as f:
for line in f.readlines():
if len(line) == 0:
continue
if line.startswith('-------------'):
if len(curr_output) > 0:
curr_output = curr_output.strip()
curr_output = curr_output.replace(
'\n', ' ').replace('\r', '')
results.append(curr_output)
curr_output = ''
else:
curr_output += line
if len(curr_output) > 0:
curr_output = curr_output.strip()
curr_output = curr_output.replace(
'\n', ' ').replace('\r', '')
results.append(curr_output)
curr_output = ''
return results
def parse_ulmfit_file(filename: str):
'''
Parses a GPT2 file to return all the results
'''
results = []
curr_output = ''
with open(filename, 'r') as f:
for line in f.readlines():
if len(line) == 0:
continue
if line.startswith('-----'):
if len(curr_output) > 0:
curr_output = curr_output.strip()
curr_output = curr_output.replace(
'\n', ' ').replace('\r', '')
results.append(curr_output)
curr_output = ''
else:
curr_output += line
if len(curr_output) > 0:
curr_output = curr_output.strip()
curr_output = curr_output.replace(
'\n', ' ').replace('\r', '')
results.append(curr_output)
curr_output = ''
return results
if __name__ == '__main__':
args = sys.argv[1:]
mode = args[0]
if not (mode == 'gpt2' or mode == 'ulmfit'):
raise NotImplementedError('This mode is not implemented')
dirname = args[1]
filenames = glob.glob(os.path.join(dirname, '*.txt'))
for filename in filenames:
if mode == 'gpt2':
results = parse_gpt2_file(filename)
elif mode == 'ulmfit':
results = parse_ulmfit_file(filename)
# figure out the outpot name
result_name = os.path.splitext(filename)[0] + '.csv'
resultsmall_name = os.path.splitext(filename)[0] + '_small.csv'
df = | pd.DataFrame(results, columns=['output']) | pandas.DataFrame |
"""
Training script. Should be pretty adaptable to whatever.
"""
import argparse
import os
import shutil
import json
from copy import deepcopy
import multiprocessing
import numpy as np
import pandas as pd
import torch
from allennlp.common.params import Params
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.optimizers import Optimizer
from torch.nn import DataParallel
from torch.nn.modules import BatchNorm2d
from tqdm import tqdm
from allennlp.nn.util import device_mapping
from vis import grounding_vis
from visualbert.utils.pytorch_misc import time_batch, save_checkpoint, clip_grad_norm, \
restore_checkpoint, print_para, restore_best_checkpoint, restore_checkpoint_flexible, load_state_dict_flexible, compute_score_with_logits
from visualbert.pytorch_pretrained_bert.tokenization import BertTokenizer
from visualbert.dataloaders.vcr import VCR, VCRLoader
try:
from visualbert.dataloaders.coco_dataset import COCODataset
except:
print("Import COCO dataset failed.")
try:
from visualbert.dataloaders.nlvr_dataset import NLVRDataset
except:
print("Import NLVR2 dataset failed.")
try:
from visualbert.dataloaders.vqa_dataset import VQADataset
except:
print("Import VQA dataset failed.")
try:
from visualbert.dataloaders.flickr_dataset import Flickr30kFeatureDataset
except:
print("Import Flickr30K dataset failed.")
from pytorch_pretrained_bert.optimization import BertAdam
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.DEBUG)
from allennlp.models import Model
from visualbert.models.model_wrapper import ModelWrapper
from visualbert.models import model
from attrdict import AttrDict
def check_prob(val_probs, INDEX_OF_CHECKED_SAMPLE):
ps = np.exp(val_probs[INDEX_OF_CHECKED_SAMPLE])
ps /= np.sum(ps)
return ps
# If you want to play grounding analysis, feel free to use this function!
def grounding_analysis(args, input_batch, output_dict, question_orig, answer_orig, obj_added_index, \
file_name_list, annot_id_list, b):
if args.orig_or_new == "new":
bert_input_ids = input_batch["bert_input_ids"].detach().cpu().numpy()
labels = input_batch["label"].detach().cpu().numpy()
objects = input_batch["objects"].detach().cpu().numpy()
attention_weights = output_dict["attention_weights"][-1].detach().cpu().numpy()
question_orig_cur = question_orig[b*args.eval_batch_size: b*args.eval_batch_size+len(bert_input_ids)]
answer_orig_cur = answer_orig[b*args.eval_batch_size: b*args.eval_batch_size+len(bert_input_ids)]
obj_added_index_cur = obj_added_index[b*args.eval_batch_size: b*args.eval_batch_size+len(bert_input_ids)]
file_name_list_cur = file_name_list[b*args.eval_batch_size: b*args.eval_batch_size+len(bert_input_ids)]
annot_id_list_cur = annot_id_list[b*args.eval_batch_size: b*args.eval_batch_size+len(bert_input_ids)]
if args.addition_annotation_analysis:
for i in range(len(bert_input_ids)):
label = labels[i]
dets2use = dets2uses[i]
file_name = file_name_list_cur[i]
annot_id = annot_id_list_cur[i]
right_ans_input_ids = bert_input_ids[i][label]
attention_weights_i = attention_weights[i*4+label]
texts = tokenizer.convert_ids_to_tokens(right_ans_input_ids)
texts, people_names = recover(texts, question_orig_cur[i], answer_orig_cur[i])
j = 0
obj_list = []
for obj in objects[i]:
if obj == 0:
obj_list.append("[BG]")
elif obj == -1:
obj_list.append("[I_PAD]")
else:
obj_list.append("["+obj_added_index_cur[i][int(dets2use[j])]+"]\n(image)")
j += 1
texts += obj_list
indices = []
for j, token in enumerate(texts):
if token == "[CLS]" or token == "[SEP]" or token == "[PAD]" or token == "[I_PAD]" or token == ".":
indices.append(j)
texts = np.delete(texts, indices, axis=0)
for j in range(len(attention_weights_i)):
attention_temp = np.delete(attention_weights_i[j], indices, axis=0)
final_attention = np.delete(attention_temp, indices, axis=1)
assert len(texts) == len(final_attention)
pos_seg = file_name.find('/')
file_name = file_name[pos_seg+1:]
grounding_vis(final_attention, texts, file_name.replace(".", "_"+annot_id+"_head_"+str(j)+"_result."), args.region, args.single_or_multiple)
def recover(texts, question_orig, answer_orig):
all_orig = question_orig + answer_orig
classes_orig = []
people_names = []
GENDER_NEUTRAL_NAMES = ['Casey', 'Riley', 'Jessie', 'Jackie', 'Avery', 'Jaime', 'Peyton', 'Kerry', 'Jody', 'Kendall',
'Peyton', 'Skyler', 'Frankie', 'Pat', 'Quinn']
GENDER_NEUTRAL_NAMES_new = [name.lower() for name in GENDER_NEUTRAL_NAMES]
for token in all_orig:
if '[' in token and ']' in token:
classes_orig.append(token)
for i, token in enumerate(texts):
if token in GENDER_NEUTRAL_NAMES_new:
while "person" not in classes_orig[0]:
if len(classes_orig) == 1:
classes_orig = []
break
classes_orig = classes_orig[1:]
if classes_orig:
texts[i] = classes_orig[0]
people_names.append(classes_orig[0])
if len(classes_orig) >= 2:
classes_orig = classes_orig[1:]
return texts, people_names
def add_index(obj_orig_list):
added_index_all = []
for obj_orig in obj_orig_list:
added_index = []
freq = dict()
for obj in obj_orig:
if obj not in freq.keys():
freq[obj] = 1
else:
freq[obj] += 1
added_index.append(obj+str(freq[obj]))
added_index_all.append(added_index)
return added_index_all
parser = argparse.ArgumentParser(description='train')
parser.add_argument(
'-folder',
dest='folder',
help='folder location',
type=str,
)
parser.add_argument(
'-no_tqdm',
dest='no_tqdm',
action='store_true',
)
parser.add_argument(
'-config',
dest='config',
help='config location',
type=str,
)
parser.add_argument(
'-region',
dest='region',
default='any',
help='region',
type=str,
)
parser.add_argument(
'-single_or_multiple',
dest='single_or_multiple',
default='single',
help='single_or_multiple',
type=str,
)
parser.add_argument(
'-orig_or_new',
dest='orig_or_new',
default='new',
help='orig_or_new',
type=str,
)
parser.add_argument(
'-addition_annotation_analysis',
dest='addition_annotation_analysis',
action='store_true',
)
parser.add_argument(
'-grounding',
dest='grounding',
action='store_true',
)
parser.add_argument(
'-scene',
dest='scene',
default='none',
help='scene',
type=str,
)
parser.add_argument(
'-not_use_all_dets',
dest='not_use_all_dets',
action='store_false'
)
args = parser.parse_args()
args = ModelWrapper.read_and_insert_args(args, args.config)
#####################################################
if os.path.exists(args.folder):
create_flag = 0
else:
create_flag = 1
print("Making directories")
os.makedirs(args.folder, exist_ok=True)
import sys
run_log_counter = 0
while(os.path.exists(args.folder + '/run_{}.log'.format(run_log_counter))):
run_log_counter += 1
file_log = open(args.folder + '/run_{}.log'.format(run_log_counter),'w') # File where you need to keep the logs
file_log.write("")
class Unbuffered:
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
file_log.write(data) # Write the data of stdout here to a text file as well
def flush(self):
pass
sys.stdout = Unbuffered(sys.stdout)
NUM_GPUS = torch.cuda.device_count()
NUM_CPUS = multiprocessing.cpu_count()
if NUM_GPUS == 0:
raise ValueError("you need gpus!")
def _to_gpu(td):
if args.get("fp16", False):
_to_fp16(td)
if NUM_GPUS > 1:
return td
for k in td:
if k != 'metadata':
if td[k] is not None:
td[k] = {k2: v.cuda(non_blocking=True) for k2, v in td[k].items()} if isinstance(td[k], dict) else td[k].cuda(non_blocking=True)
return td
def _to_fp16(td):
for k in td:
if isinstance(td[k], torch.FloatTensor):
td[k] = td[k].to(dtype=torch.float16)
num_workers = args.get("num_workers", 2)
val_workers = args.get("val_workers", 0)
TEST_DATA_READING = False
if TEST_DATA_READING:
num_workers = 0
print(f"Using {num_workers} workers out of {NUM_CPUS} possible", flush=True)
loader_params = {'batch_size': args.train_batch_size // NUM_GPUS, 'num_gpus':NUM_GPUS, 'num_workers':num_workers}
def get_dataset_loader(args, dataset_name):
if dataset_name == "vcr":
train, orig_val, val, val_addition, test = VCR.splits(
mode='rationale' if args.rationale else 'answer',
region_keywords = args.region,
scene = args.scene,
single_or_multiple = args.single_or_multiple,
only_use_relevant_dets = args.not_use_all_dets,
do_lower_case = args.do_lower_case,
bert_model_name = args.bert_model_name,
max_seq_length = args.max_seq_length,
pretraining = args.pretraining,
pretraining_include_qa_and_qar = args.pretraining_include_qa_and_qar,
complete_shuffle = args.get("complete_shuffle", False),
use_alignment = args.get('use_alignment', False),
add_all_features = args.add_all_features,
answer_labels_path = args.get("answer_labels_path", None),
vcr_annots_dir = args.vcr_annots_dir,
vcr_image_dir = args.vcr_image_dir
)
elif dataset_name == "coco":
train, val, test = COCODataset.splits(args)
elif dataset_name == "nlvr":
train, val, test = NLVRDataset.splits(args)
elif dataset_name == "vqa":
train, val, test = VQADataset.splits(args)
elif dataset_name == "flickr":
train, val, test = Flickr30kFeatureDataset.splits(args)
else:
assert(0)
loader_params = {'batch_size': args.train_batch_size // NUM_GPUS, 'num_gpus':NUM_GPUS, 'num_workers':num_workers}
train_loader_params = deepcopy(loader_params)
loader_params_val = {'batch_size': args.eval_batch_size // NUM_GPUS, 'num_gpus':NUM_GPUS, 'num_workers':num_workers}
val_loader_params = deepcopy(loader_params_val)
val_loader_params["num_workers"] = val_workers
test_loader_params = deepcopy(loader_params_val)
test_loader_params["num_workers"] = val_workers
train_loader = VCRLoader.from_dataset(train, **train_loader_params)
val_loader = VCRLoader.from_dataset(val, **val_loader_params)
test_loader = VCRLoader.from_dataset(test, **test_loader_params)
if dataset_name == "vcr":
orig_val_loader_params = deepcopy(loader_params_val)
orig_val_loader_params["num_workers"] = val_workers
val_addition_loader_params = deepcopy(loader_params_val)
val_addition_loader_params["num_workers"] = val_workers
orig_val_loader = VCRLoader.from_dataset(orig_val, **orig_val_loader_params)
val_addition_loader = VCRLoader.from_dataset(val_addition, **val_addition_loader_params)
train_set_size = len(train)
print("orig_val size", len(orig_val))
print("val size", len(val))
print("val-addition size", len(val_addition))
if dataset_name == "vcr":
return train_loader, orig_val_loader, val_loader, val_addition_loader, test_loader, train_set_size
else:
return train_loader, val_loader, test_loader, train_set_size
print(args)
if args.dataset == "vcr":
train_loader, orig_val_loader, val_loader, val_addition_loader, test_loader, train_set_size = get_dataset_loader(args, args.dataset)
else:
train_loader, val_loader, test_loader, train_set_size = get_dataset_loader(args, args.dataset)
ARGS_RESET_EVERY = args.get("print_every", 100)
train_model = ModelWrapper(args, train_set_size)
# Loading from pre-trained model
if args.restore_bin:
train_model.restore_checkpoint_pretrained(args.restore_bin)
# Loading from previous checkpoint
'''if create_flag == 0:
start_epoch, val_metric_per_epoch = train_model.restore_checkpoint(serialization_dir=args.folder, epoch_to_load = args.get("epoch_to_load", None))
if val_metric_per_epoch is None:
val_metric_per_epoch = []
else:
create_flag = 1
start_epoch, val_metric_per_epoch = 0, []'''
start_epoch, val_metric_per_epoch = 0, []
shutil.copy2(args.config, args.folder) # Always copy the config
if args.get("freeze_detector", True):
train_model.freeze_detector()
param_shapes = print_para(train_model.model)
print(args)
print("########### Starting from {}".format(start_epoch))
num_batches = 0
stop_epoch = args.num_train_epochs
save_every = args.get("save_every", None)
with open('../dataloaders/cocoontology.json', 'r') as f1:
coco = json.load(f1)
coco_objects = ['__background__'] + [x['name'] for k, x in sorted(coco.items(), key=lambda x: int(x[0]))]
tokenizer = BertTokenizer.from_pretrained(args.bert_model_name, do_lower_case=args.do_lower_case)
for epoch_num in range(start_epoch, stop_epoch):
train_results = []
norms = []
train_model.model.train()
if not args.get("skip_training", False):
for b, (time_per_batch, batch) in enumerate(time_batch(tqdm(train_loader), reset_every=ARGS_RESET_EVERY)):
del batch["dets2use"]
batch = _to_gpu(batch)
output_dict = train_model.step(batch)
num_batches += 1
train_results.append(pd.Series({'loss': output_dict['loss'].mean().item(),
'crl': output_dict.get("cnn_regularization_loss", 0.0),
'next_sentence_loss': output_dict["next_sentence_loss"].mean().item() if "next_sentence_loss" in output_dict else 0.0,
'masked_lm_loss': output_dict["masked_lm_loss"].mean().item() if "masked_lm_loss" in output_dict else 0.0,
'accuracy': (train_model.model.module).get_metrics(
reset=(b % ARGS_RESET_EVERY) == 0)[
'accuracy'],
'sec_per_batch': time_per_batch,
'hr_per_epoch': len(train_loader) * time_per_batch / 3600,
}))
if b % ARGS_RESET_EVERY == 0 and b > 0:
print("e{:2d}b{:5d}/{:5d}. \nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format(
epoch_num, b, len(train_loader),
| pd.DataFrame(train_results[-ARGS_RESET_EVERY:]) | pandas.DataFrame |
from selenium import webdriver as wd
from selenium.webdriver.chrome.options import Options
import time
import csv
import os
import random
import json
import shutil
import pandas as pd
from modules.checker import Checker
from modules.basic_scraping_module import get_response #, get_soup
from modules.supplier_utils.uniform_category_transformer import query_uniform_category
def read_scrapy_setting():
img_hist = "./res3/img_html_source/img_hist.txt"
with open(img_hist, "r", encoding="utf-8-sig") as fp:
data = fp.readlines()
break_point = int(data[1].split(":")[-1].strip())
avg_wait_time = int(data[2].split(":")[-1].strip())
return break_point, avg_wait_time
class Webdriver():
def get_webdriver(self):
chrome_options = Options()
chrome_options.headless = True
wd_path = "D:/geckodriver/chromedriver.exe"
driver = wd.Chrome(wd_path, options=chrome_options)
driver.implicitly_wait(10)
return driver
class Clothes_crawler():
def imgID_padding(self):
csv_path = "./res3/tier_2.csv"
df = pd.read_csv(csv_path)
#print(data.head())
new_col_data = [i for i in range(1, len(df)+1)]
new_col_name = "img_id"
df[new_col_name] = new_col_data
#print(data.tail())
out_csv_path = "./res3/tier_2_modified.csv"
df.to_csv(out_csv_path, encoding="utf-8-sig", index=False)
###########################################################
def copy_single_prod_img(self, img_id, existing_img_id):
img_dir = "./res3/img_html_source/"
shutil.copy(f"{img_dir}{existing_img_id}.jpg", f"{img_dir}{img_id}.jpg")
def download_single_prod_img(self, prod_img_link, img_id, wait_time):
img_path = f"./res3/img_html_source/{img_id}.jpg"
if os.path.exists(img_path):
print(f"[img {img_id}] Image is already exists.")
return 0
# [***] send requests to image link
# put all correct image links to the new csv file
# path: ./res3/img_html_source
if "grey.gif" not in prod_img_link:
try:
r = get_response(prod_img_link)
with open(img_path, "wb") as fp:
fp.write(r.content)
print(f"[img {img_id}] Successfully downloaded.")
# 等待隨機時間 (以傳入參數 wait_time 為中心)
self.wait_some_seconds(wait_time + random.randint(-53,41)/10)
return 1
except:
print(f"[img {img_id}] ERR-2: Fail to access image link when scrapying image")
return -1
else:
print("跳過")
def wait_some_seconds(self, wait_time):
#print(f"(隨機)等待 {wait_time} 秒")
print(f"等待 {wait_time} 秒")
time.sleep(wait_time)
def download_multiple_prod_imgs(self, break_point=-1, wait_time=10):
# reset crawler
self.set_driver()
# read image history if exists
img_hist = "./res3/img_html_source/img_hist.txt"
if os.path.exists(img_hist):
with open(img_hist, "r", encoding="utf-8-sig") as fp:
data = fp.readlines()
img_id_start = int(data[0].split(":")[-1].strip()) # starts from next image of last image in the directory
else:
img_id_start = 5001 # 1
# read image mapping if exists
img_mapping_json = "./res3/img_html_source/img_record.json"
if os.path.exists(img_mapping_json):
with open(img_mapping_json, "r", encoding="utf-8-sig") as fp:
img_mapping = json.load(fp)
else:
img_mapping = dict() # k: prod_link, v: img_id
# create env
env_path = r"./res3/img_html_source"
if not os.path.exists(env_path):
os.mkdir(env_path)
# read product urls from existing tier-2 csv
csv_path = "./res3/tier_2_modified.csv"
prod_data = | pd.read_csv(csv_path) | pandas.read_csv |
#!/usr/bin/python
# _____________________________________________________________________________
# ----------------
# import libraries
# ----------------
# standard libraries
# -----
import torch
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from PIL import Image
import json
# utilities
# -----
# custom functions
# -----
def show_batch(sample_batched):
"""
sample_batched: Tuple[torch.tensor, torch.tensor] -> None
show_batch takes a contrastive sample sample_batched and plots
an overview of the batch
"""
grid_border_size = 2
nrow = 10
batch_1 = sample_batched[0][0][:, 0:, :, :]
batch_2 = sample_batched[0][1][:, 0:, :, :]
difference = np.abs(batch_1 - batch_2)
titles = ["first contrast", "second contrast", "difference"]
fig, axes = plt.subplots(1, 3, figsize=(2 * 6.4, 4.8))
for (i, batch) in enumerate([batch_1, batch_2, difference]):
ax = axes[i]
grid = utils.make_grid(batch, nrow=nrow, padding=grid_border_size)
ax.imshow(grid.numpy().transpose((1, 2, 0)))
ax.set_title(titles[i])
ax.axis("off")
plt.show()
# ----------------
# custom classes
# ----------------
# custom CLTT dataset superclass (abstract)
# -----
class CLTTDataset(Dataset):
"""
CLTTDataset is an abstract class implementing all the necessary methods
to sample data according to the CLTT approach. CLTTDataset itself
should not be instantiated as a standalone class, but should be
inherited from and abstract methods should be overwritten
"""
def __init__(self, root, train=True, transform=None, target_transform=None,
n_fix=5, contrastive=True, sampling_mode='uniform', shuffle_object_order=True, circular_sampling=True, buffer_size=12096):
"""
__init__ initializes the CLTTDataset Class, it defines class-wide
constants and builds the registry of files and the data buffer
root:str path to the dataset directory
train:bool training set instead of testing
transform:torchvision.transform
target_transform:torchvision.transform
n_fix:int for deterministic n_fix, float for probabilistic
contrastive:bool contrastive dataset mode
sampling_mode:str how the buffer gets built
circular_sampling:bool make the first object the last object
buffer_size:int approximate buffersize
"""
super().__init__()
self.train = train
self.sampling_mode = sampling_mode
self.shuffle_object_order = shuffle_object_order
self.buffer_size = buffer_size
self.n_fix = n_fix
self.tau_plus = 1
self.tau_minus = 0 # contrasts from the past (experimental)
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.contrastive = contrastive
self.circular_sampling = circular_sampling
self.get_dataset_properties()
self.registry = self.build_registry(train)
if self.contrastive:
self.buffer = self.build_buffer(self.registry, self.sampling_mode, self.n_fix, self.shuffle_object_order, approx_size=self.buffer_size)
else:
# if used in non-contrastive mode the sampler just samples from all data
self.buffer = self.registry
pass
def __len__(self):
"""
__len__ defines the length of the dataset and indirectly
defines how many samples can be drawn from the dataset
in one epoch
"""
length = len(self.buffer)
return length
def get_dataset_properties(self):
"""
get_dataset_properties has to be defined for each dataset
it stores number of objects, number of classes, a list of
strings with labels
"""
# basic properties (need to be there)
self.n_objects = 3 # number of different objects >= n_classes
self.n_classes = 3 # number of different classes
self.labels = [
"A",
"B",
"C",
]
self.n_views_per_object = 10 # how many overall views of each object
self.subdirectory = '/dataset_name/' # where is the dataset
self.name = 'dataset name' # name of the dataset
# custom properties (optional, dataset specific)
# (anything you would want to have available in self)
self.custom_property = ['one', 'two', 'three']
raise Exception("Calling abstract method, please inherit \
from the CLTTDataset class and reimplement this method") # pseudoclass
pass
def __getitem__(self, idx):
"""
__getitem__ is a method that defines how one sample of the
dataset is drawn
"""
if self.contrastive:
image, label = self.get_single_item(idx)
augmentation, _ = self.sample_contrast(idx)
if self.transform:
image, augmentation = self.transform(
image), self.transform(augmentation)
if self.target_transform:
label = self.target_transform(label)
output = ([image, augmentation], label)
else:
image, label = self.get_single_item(idx)
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
output = image, label
return output
def sample_contrast(self, chosen_index):
"""
given index chosen_index, sample a corresponding contrast close in time
"""
chosen_time = self.buffer.iloc[chosen_index]["time_idx"]
possible_indices = self.buffer[
(self.buffer["time_idx"].between(chosen_time - self.tau_minus, chosen_time + self.tau_plus)) & (
self.buffer["time_idx"] != chosen_time)].index
# sampling at the end of the buffer
if (chosen_time + self.tau_plus) > self.buffer.time_idx.max():
if self.circular_sampling:
also_possible = self.buffer[
(self.buffer["time_idx"].between(self.buffer.time_idx.min(), (
chosen_time + self.tau_plus - 1) - self.buffer.time_idx.max())) & (
self.buffer["time_idx"] != chosen_time)].index
else:
also_possible = self.buffer[self.buffer["time_idx"] == chosen_time].index
possible_indices = possible_indices.union(also_possible)
# sampling at the beginning of the buffer
if (chosen_time - self.tau_minus) < self.buffer.time_idx.min():
if self.circular_sampling:
also_possible = self.buffer[
(self.buffer["time_idx"].between(self.buffer.time_idx.max() + (chosen_time - self.tau_minus) + 1,
self.buffer.time_idx.max())) & (
self.buffer["time_idx"] != chosen_time)].index
else:
also_possible = self.buffer[self.buffer["time_idx"] == chosen_time].index
possible_indices = possible_indices.union(also_possible)
chosen_index = np.random.choice(possible_indices)
return self.get_single_item(chosen_index)
def get_single_item(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
elif isinstance(idx, pd.core.indexes.numeric.Int64Index):
idx = idx[0]
path_to_file = self.buffer.loc[idx, "path_to_file"]
if isinstance(path_to_file, pd.core.series.Series):
path_to_file = path_to_file.item()
image = Image.open(path_to_file)
obj_info = self.buffer.iloc[idx, 1:].to_dict()
label = self.buffer.loc[idx, "label"]
return image, label
def build_registry(self, train):
"""
build a registry of all image files
"""
path_list = []
object_list = []
label_list = []
time_list = []
d = self.root + self.subdirectory + 'train/' if train else self.root + self.subdirectory + 'test/'
# have an ordered list
list_of_files = os.listdir(d)
list_of_files.sort()
for timestep, path in enumerate(list_of_files):
full_path = os.path.join(d, path)
if os.path.isfile(full_path):
path_list.append(full_path)
object_list.append(timestep // self.n_views_per_object)
label_list.append(timestep // self.n_views_per_object)
time_list.append(timestep % self.n_views_per_object)
tempdict = {'path_to_file': path_list, 'label': label_list, 'object_nr': object_list, 'time_idx': time_list}
dataframe = pd.DataFrame(tempdict)
dataframe.sort_values(by=['object_nr', 'time_idx'], inplace=True)
dataframe.reset_index(drop=True, inplace=True)
return dataframe
def build_buffer(self, registry, sampling_mode, n_fix, shuffle_object_order, approx_size):
"""
build_buffer builds a buffer from all data that is available
according to the sampling mode specified. Default method just
returns the whole registry
"""
# if n_fix is a probability, then get an expected value of the number of views
expected_views = n_fix if n_fix >= 1 else self.expected_n(n_fix)
object_order = np.arange(self.n_objects)
if shuffle_object_order:
np.random.shuffle(object_order)
if sampling_mode == 'window':
streambits = []
for _ in range(approx_size // (round(expected_views) * self.n_objects)):
for o in object_order:
n_views = self.get_n(n_fix) # get the n_fix for each object
chosen_index = np.random.choice(np.arange(0, self.n_views_per_object - n_views))
streambits.append(registry[registry.object_nr == o][
registry.time_idx.between(chosen_index, chosen_index + n_views - 1)])
if shuffle_object_order:
np.random.shuffle(object_order)
timestream = pd.concat(streambits, ignore_index=True)
timestream.time_idx = np.arange(len(timestream.time_idx))
elif sampling_mode == 'uniform':
streambits = []
for _ in range(approx_size // (round(expected_views) * self.n_objects)):
for o in object_order:
n_views = self.get_n(n_fix) # get the n_fix for each object
chosen_indexs = np.random.choice(np.arange(0, self.n_views_per_object), n_views)
streambits.append(registry[registry.object_nr == o].iloc[chosen_indexs])
if shuffle_object_order:
np.random.shuffle(object_order)
timestream = pd.concat(streambits, ignore_index=True)
timestream.time_idx = np.arange(len(timestream.time_idx))
elif sampling_mode == 'randomwalk':
streambits = []
for _ in range(approx_size // (round(expected_views) * self.n_objects)):
for o in object_order:
n_views = self.get_n(n_fix) # get the n_fix for each object
streambits.append(registry.iloc[self.get_N_randomwalk_steps(n_views, o)])
timestream = pd.concat(streambits, ignore_index=True)
timestream.time_idx = np.arange(len(timestream.time_idx))
else:
print("[INFO] Warning, no sampling mode specified, defaulting to \
whole dataset")
timestream = registry #if no mode, then return the whole registry
return timestream
def refresh_buffer(self):
"""
refresh buffer takes an CLTTDataset class and refreshes its own buffer
given the registry
"""
self.buffer = self.build_buffer(self.registry, self.sampling_mode, self.n_fix, self.shuffle_object_order, self.buffer_size)
pass
def get_N_randomwalk_steps(self, N, object_nr):
"""
Get index values of N random walk steps of a object specified by "object_nr".
"""
raise Exception("Calling abstract method, please inherit \
from the CLTTDataset class and reimplement this method") # pseudoclass
pass
def expected_n(self, probability):
"""
expected_n takes a float probability between 0 and 1
and returns the expected value of the number of fixations
"""
result = (1-probability)*(probability)/(1-(probability))**2 + 1
return result
def get_n(self, input):
"""
get_n takes a float probability input between 0 and 1
and returns n fixations according to probability
if input >= 1 it just returns its argument
"""
if input >= 1:
return input
else:
result = 1 # make sure that you switch to the next object once
while input > np.random.random():
result += 1
return result
# datasets (CLTTDataset subclasses)
# -----
# TODO: Rewrite MiyashitaDataset to be compatible with probabilistic n_fix
class MiyashitaDataset(CLTTDataset):
"""
MiyashitaDataset is a dataset inspired by the works of
Miyashita, 1988 it is comprised of a set of different fractal patterns that
are presented in a specific order to be associated.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
pass
def get_dataset_properties(self):
# basic properties (need to be there)
self.n_objects = 100 # number of different objects >= n_classes
self.n_classes = 100 # number of different classes
self.labels = [str(i) for i in range(self.n_classes)]
self.n_views_per_object = 100 if (self.train and self.contrastive) else 1
#self.n_fix # how many overall views of each object
self.subdirectory = '/fractals100_64x64/' # where is the dataset
self.name = 'Miyashita Fractals' # name of the dataset
# custom properties (optional, dataset specific)
# (anything you would want to have available in self)
# for Miyashita every mode is the same
# that means we do not need to reimplement get_n_randomwalk_steps
# and can just fix the sampling mode
self.sampling_mode = "uniform" if (self.train and self.contrastive) else "" # overwrite sampling mode
self.basic_transform = transforms.RandomAffine(
degrees=(-10, 10),
translate=(0.15, 0.15),
scale=(0.9, 1.0))
# add the basic transform the the regular transform for training
if (self.train and self.contrastive):
self.transform = transforms.Compose([
self.basic_transform,
self.transform,
])
pass
def build_registry(self, train):
"""
Reimplementation of the build_registry method, because Miyashita
Fractals have no testset and the in-class variability is generated
virtually instead of having multiple pictures
"""
path_list = []
object_list = []
label_list = []
time_list = []
e = 0
d = self.root + self.subdirectory # there is no fractals testset
# have an ordered list
list_of_files = os.listdir(d)
list_of_files.sort()
for o, path in enumerate(list_of_files):
full_path = os.path.join(d, path)
if os.path.isfile(full_path):
repetitions = self.n_views_per_object
# repeat the same picture n_fix times
for timestep in range(repetitions):
path_list.append(full_path)
time_list.append(timestep + e * self.n_views_per_object)
object_list.append(o)
label_list.append(o)
e += 1
temporary_dict = {'path_to_file': path_list,
'label': label_list,
'object_nr': object_list,
'time_idx': time_list}
dataframe = pd.DataFrame(temporary_dict)
dataframe.sort_values(by=['object_nr', 'time_idx'], inplace=True)
dataframe.reset_index(drop=True, inplace=True)
return dataframe
class TDWDataset(CLTTDataset):
"""
The ThreeDWorld Dataset by <NAME> is
comprised of 1008 views around 12 distinct objects rendered
in the TDW environment
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
pass
def get_dataset_properties(self):
# basic properties (need to be there)
self.n_objects = 12 # number of different objects >= n_classes
self.n_classes = 12 # number of different classes
self.labels = [
"cup",
"comb",
"scissor",
"hammer",
"book",
"calculator",
"goblet",
"candle",
"headphones",
"screwdriver",
"cassette",
"bottle",
]
delta_phi = 10
self.phis = np.arange(0, 360, delta_phi)
delta_theta = 10
self.thetas = np.arange(10, 80, delta_theta)
delta_r = 0.1
self.rs = np.arange(0.3, 0.7, delta_r)
self.n_views_per_object = len(self.phis) * len(self.thetas) * len(self.rs) # how many overall views of each object
self.subdirectory = '/spherical_photoreal_64x64_DoF/' # where is the dataset
self.name = 'ThreeDWorld Objects' # name of the dataset
# custom properties (optional, dataset specific)
# (anything you would want to have available in self)
pass
def get_N_randomwalk_steps(self, N, object_nr):
"""
Get index values of N random walk steps of a object specified by "object_nr".
"""
def get_registry_index(r, theta, phi):
"""
helper function to get index given a coordinate tuple,
i.e. r, theta and phi value
"""
ix = r * (len(self.thetas) * len(self.phis)) + theta * len(self.phis) + phi
return ix
index = []
# Possible values for r,theta and phi
r = len(self.rs)
theta = len(self.thetas)
phi = len(self.phis)
# select random start values for r,theta and phi
current_r = np.random.randint(0, r - 1)
current_theta = np.random.randint(0, theta - 1)
current_phi = np.random.randint(0, phi - 1)
for i in range(N):
while True:
# 6 possible direction in which to go from the current position
# Possible steps: +/-r, +/-Phi, +/-Theta
rand = np.random.randint(low=0, high=5)
# For the choosen step direction, it will be checked if this is a "valid".
if (rand == 0) & (current_r < r - 1):
current_r += 1
break
if (rand == 1) & (current_r > 0):
current_r -= 1
break
if (rand == 2) & (current_theta < theta - 1):
current_theta += 1
break
if (rand == 3) & (current_theta > 0):
current_theta -= 1
break
if (rand == 4) & (current_phi < phi - 1):
current_phi += 1
break
if (rand == 5) & (current_phi > 0):
current_phi -= 1
break
# transform r,theta, phi values
# into index number between 0 and 1008
ix = get_registry_index(
current_r, current_theta, current_phi)
index.append(ix)
index = np.array(index)
# to get index values for object "object_nr", the values are shifted
index += self.n_views_per_object * object_nr
return index
def additional_metadata(self):
# hacky way to get some metadata, to be revised
phi_angle_list = []
theta_angle_list = []
radius_list = []
for o in range(self.n_classes):
for r in self.rs:
for theta in self.thetas:
for phi in self.phis:
phi_angle_list.append(phi)
theta_angle_list.append(theta)
radius_list.append(r)
tempdict = {'phi': phi_angle_list, 'theta': theta_angle_list, 'radius': radius_list}
dataframe = pd.DataFrame(tempdict)
self.registry= pd.merge(self.registry, dataframe, left_index=True, right_index=True)
pass
class COIL100Dataset(CLTTDataset):
"""
COIL100Dataset is a dataset by the work of Sameer, Shree, and Hiroshi, 1996.
It is comprised of color images of 100 objects, and each object has 72 views.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
pass
def get_dataset_properties(self):
# basic properties (need to be there)
self.n_objects = 100 # number of different objects >= n_classes
self.n_classes = 100 # number of different classes
self.labels = [str(i) for i in range(self.n_classes)]
if self.train:
self.n_views_per_object = 54 # number of overall views of each object on trainset
else:
self.n_views_per_object = 18 # number of overall views of each object on testset
self.subdirectory = '/coil100_128x128/' # where is the dataset
self.name = 'Columbia University Image Library' # name of the dataset
# custom properties (optional, dataset specific)
# (anything you would want to have available in self)
pass
def get_N_randomwalk_steps(self, N, object_nr):
"""
Get index values of N random walk steps of a object specified by "object_nr".
"""
index = []
current_idx = np.random.randint(0, self.n_views_per_object - 1)
for i in range(N):
while True:
# 2 possible direction in which to go from the current position
# Possible steps: +: left, -: right
rand = np.random.randint(low=0, high=2)
if (rand == 0) & (current_idx > 0):
current_idx -= 1
break
if (rand == 1) & (current_idx < self.n_views_per_object - 1):
current_idx += 1
break
index.append(current_idx)
index = np.array(index)
index += self.n_views_per_object * object_nr
return index
def build_registry(self, train):
"""
build a registry of all image files
"""
path_list = []
object_list = []
label_list = []
time_list = []
# d = self.root + self.subdirectory
d = self.root + self.subdirectory + 'train/' if train else self.root + self.subdirectory + 'test/'
# have an ordered list
list_of_files = os.listdir(d)
list_of_files.sort()
for timestep, path in enumerate(list_of_files):
full_path = os.path.join(d, path)
if os.path.isfile(full_path):
path_list.append(full_path)
object_list.append(timestep // self.n_views_per_object)
label_list.append(timestep // self.n_views_per_object)
time_list.append(timestep % self.n_views_per_object)
tempdict = {'path_to_file': path_list, 'label': label_list, 'object_nr': object_list, 'time_idx': time_list}
dataframe = pd.DataFrame(tempdict)
dataframe.sort_values(by=['object_nr', 'time_idx'], inplace=True)
dataframe.reset_index(drop=True, inplace=True)
return dataframe
class RoadDefectsDataset(CLTTDataset):
"""
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
pass
def get_dataset_properties(self):
# basic properties (need to be there)
self.n_objects = 12 # number of different objects >= n_classes
self.n_classes = 12 # number of different classes
self.labels = ['Affaissement de rive',
'Affaissement hors rive',
'Arrachement',
'Autres réparations',
'Faïençage',
'Fissure longitudinale',
'Fissure transversale',
'Glaçage - Ressuage',
'Réparation en BB sur découpe',
'Fissure thermique',
'Orniérage',
'Background'
]
self.subdirectory = '/home/finn/DATASET/CD33/' # where is the dataset
self.name = 'Road defects' # name of the dataset
self.label_file_train = 'data/defects_train.json'
self.label_file_test = 'data/defects_val_small.json'
# custom properties (optional, dataset specific)
# (anything you would want to have available in self)
pass
def get_N_randomwalk_steps(self, N, object_nr, n_views_per_object):
"""
Get index values of N random walk steps of a object specified by "object_nr".
"""
index = []
current_idx = np.random.randint(0, n_views_per_object - 1)
for i in range(N):
while True:
# 2 possible direction in which to go from the current position
# Possible steps: +: left, -: right
rand = np.random.randint(low=0, high=2)
if (rand == 0) & (current_idx > 0):
current_idx -= 1
break
if (rand == 1) & (current_idx < n_views_per_object - 1):
current_idx += 1
break
index.append(current_idx)
index = np.array(index)
index += n_views_per_object * object_nr
return index
def build_registry(self, train):
"""
build a registry of all image files
"""
path_list = []
object_list = []
label_list = []
time_list = []
# d = self.root + self.subdirectory
# d = self.subdirectory + 'train/' if train else self.subdirectory + 'test/'
d = self.subdirectory
#load labels
if self.train:
with open(self.label_file_train, 'r') as f_in:
labels = json.load(f_in)
else:
with open(self.label_file_test, 'r') as f_in:
labels = json.load(f_in)
# dict to count instances of each class/object
time_dict = {l:0 for l in self.labels}
for file_name, l_list in labels.items():
full_path = os.path.join(d, file_name)
if os.path.isfile(full_path):
for label in set(l_list):
path_list.append(full_path)
object_list.append(self.labels.index(label))
label_list.append(self.labels.index(label))
time_list.append(time_dict[label])
time_dict[label] += 1
tempdict = {'path_to_file': path_list, 'label': label_list, 'object_nr': object_list, 'time_idx': time_list}
dataframe = pd.DataFrame(tempdict)
dataframe.sort_values(by=['object_nr', 'time_idx'], inplace=True)
dataframe.reset_index(drop=True, inplace=True)
return dataframe
def build_buffer(self, registry, sampling_mode, n_fix, shuffle_object_order, approx_size):
"""
build_buffer builds a buffer from all data that is available
according to the sampling mode specified. Default method just
returns the whole registry
"""
# if n_fix is a probability, then get an expected value of the number of views
expected_views = n_fix if n_fix >= 1 else self.expected_n(n_fix)
object_order = np.arange(self.n_objects)
if shuffle_object_order:
np.random.shuffle(object_order)
if sampling_mode == 'window':
streambits = []
for _ in range(approx_size // (round(expected_views) * self.n_objects)):
for o in object_order:
n_views = self.get_n(n_fix) # get the n_fix for each object
chosen_index = np.random.choice(np.arange(0, registry.object_nr.value_counts()[o] - n_views))
streambits.append(registry[registry.object_nr == o][
registry.time_idx.between(chosen_index, chosen_index + n_views - 1)])
if shuffle_object_order:
np.random.shuffle(object_order)
timestream = pd.concat(streambits, ignore_index=True)
timestream.time_idx = np.arange(len(timestream.time_idx))
elif sampling_mode == 'uniform':
streambits = []
for _ in range(approx_size // (round(expected_views) * self.n_objects)):
for o in object_order:
n_views = self.get_n(n_fix) # get the n_fix for each object
chosen_indexs = np.random.choice(np.arange(0, registry.object_nr.value_counts()[o]), n_views)
streambits.append(registry[registry.object_nr == o].iloc[chosen_indexs])
if shuffle_object_order:
np.random.shuffle(object_order)
timestream = | pd.concat(streambits, ignore_index=True) | pandas.concat |
import vectorbt as vbt
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from vectorbt.generic import nb as generic_nb
from vectorbt.generic.enums import range_dt
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
mask = pd.DataFrame([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]), columns=['a', 'b', 'c'])
ts = pd.Series([1., 2., 3., 2., 1.], index=mask.index)
price = pd.DataFrame({
'open': [10, 11, 12, 11, 10],
'high': [11, 12, 13, 12, 11],
'low': [9, 10, 11, 10, 9],
'close': [11, 12, 11, 10, 9]
})
group_by = pd.Index(['g1', 'g1', 'g2'])
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# accessors.py ############# #
class TestAccessors:
def test_indexing(self):
assert mask.vbt.signals['a'].total() == mask['a'].vbt.signals.total()
def test_freq(self):
assert mask.vbt.signals.wrapper.freq == day_dt
assert mask['a'].vbt.signals.wrapper.freq == day_dt
assert mask.vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert mask['a'].vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert pd.Series([False, True]).vbt.signals.wrapper.freq is None
assert pd.Series([False, True]).vbt.signals(freq='3D').wrapper.freq == day_dt * 3
assert pd.Series([False, True]).vbt.signals(freq=np.timedelta64(4, 'D')).wrapper.freq == day_dt * 4
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_fshift(self, test_n):
pd.testing.assert_series_equal(mask['a'].vbt.signals.fshift(test_n), mask['a'].shift(test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.fshift(test_n).values,
generic_nb.fshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.fshift(test_n), mask.shift(test_n, fill_value=False))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_bshift(self, test_n):
pd.testing.assert_series_equal(
mask['a'].vbt.signals.bshift(test_n),
mask['a'].shift(-test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.bshift(test_n).values,
generic_nb.bshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.bshift(test_n), mask.shift(-test_n, fill_value=False))
def test_empty(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty(5, index=np.arange(10, 15), name='a'),
pd.Series(np.full(5, False), index=np.arange(10, 15), name='a')
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty((5, 3), index=np.arange(10, 15), columns=['a', 'b', 'c']),
pd.DataFrame(np.full((5, 3), False), index=np.arange(10, 15), columns=['a', 'b', 'c'])
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty_like(mask['a']),
pd.Series(np.full(mask['a'].shape, False), index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty_like(mask),
pd.DataFrame(np.full(mask.shape, False), index=mask.index, columns=mask.columns)
)
def test_generate(self):
@njit
def choice_func_nb(from_i, to_i, col, n):
if col == 0:
return np.arange(from_i, to_i)
elif col == 1:
return np.full(1, from_i)
else:
return np.full(1, to_i - n)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate(5, choice_func_nb, 1, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate((5, 2), choice_func_nb, 1)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate(
(5, 3), choice_func_nb, 1, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, False],
[True, False, False],
[True, False, False],
[True, False, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate(
(5, 3), choice_func_nb, 1, pick_first=True, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_both(self):
@njit
def entry_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
@njit
def exit_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((mask.shape[0],), dtype=np.int_)
en, ex = pd.Series.vbt.signals.generate_both(
5, entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, True, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name, entry_wait=1, exit_wait=0)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name, entry_wait=0, exit_wait=1)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
@njit
def entry_func2_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
if from_i + 1 < to_i:
temp_int[1] = from_i + 1
return temp_int[:2]
return temp_int[:1]
@njit
def exit_func2_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
if from_i + 1 < to_i:
temp_int[1] = from_i + 1
return temp_int[:2]
return temp_int[:1]
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func2_nb, (temp_int,), exit_func2_nb, (temp_int,),
entry_pick_first=False, exit_pick_first=False,
index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[False, False, False],
[False, False, False],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, True],
[True, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_exits(self):
@njit
def choice_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((mask.shape[0],), dtype=np.int_)
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func_nb, temp_int, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
@njit
def choice_func2_nb(from_i, to_i, col, temp_int):
for i in range(from_i, to_i):
temp_int[i - from_i] = i
return temp_int[:to_i - from_i]
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func2_nb, temp_int, until_next=False, pick_first=False),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[True, True, False],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
mask2 = pd.Series([True, True, True, True, True], index=mask.index)
pd.testing.assert_series_equal(
mask2.vbt.signals.generate_exits(choice_func_nb, temp_int, until_next=False, skip_until_exit=True),
pd.Series(
np.array([False, True, False, True, False]),
index=mask.index
)
)
def test_clean(self):
entries = pd.DataFrame([
[True, False, True],
[True, False, False],
[True, True, True],
[False, True, False],
[False, True, True]
], index=mask.index, columns=mask.columns)
exits = pd.Series([True, False, True, False, True], index=mask.index)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[1],
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[1],
pd.DataFrame(
np.array([
[False, True, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries, exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries, exits)[1],
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.clean(entries, entries, entries)
def test_generate_random(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate_random(
5, n=3, seed=seed, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([False, True, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate_random((5, 2), n=3)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), n=3, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, False, True],
[True, True, True],
[True, True, False],
[False, True, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), n=[0, 1, 2], seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, False, True],
[False, False, True],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate_random(
5, prob=0.5, seed=seed, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([True, False, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate_random((5, 2), prob=3)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=0.5, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, True],
[False, True, False],
[False, False, False],
[False, False, True],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=[0., 0.5, 1.], seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, True, True],
[False, True, True],
[False, False, True],
[False, False, True],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
with pytest.raises(Exception):
pd.DataFrame.vbt.signals.generate_random((5, 3))
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=[0., 0.5, 1.], pick_first=True, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_random_both(self):
# n
en, ex = pd.Series.vbt.signals.generate_random_both(
5, n=2, seed=seed, index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), n=2, seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[False, True, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), n=[0, 1, 2], seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[False, False, True],
[False, True, False],
[False, False, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, True],
[False, False, False],
[False, True, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((2, 3), n=2, seed=seed, entry_wait=1, exit_wait=0)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True]
])
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((3, 3), n=2, seed=seed, entry_wait=0, exit_wait=1)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[False, False, False]
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[True, True, True],
])
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((7, 3), n=2, seed=seed, entry_wait=2, exit_wait=2)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False]
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[True, True, True]
])
)
)
n = 10
a = np.full(n * 2, 0.)
for i in range(10000):
en, ex = pd.Series.vbt.signals.generate_random_both(1000, n, entry_wait=2, exit_wait=2)
_a = np.empty((n * 2,), dtype=np.int_)
_a[0::2] = np.flatnonzero(en)
_a[1::2] = np.flatnonzero(ex)
a += _a
greater = a > 10000000 / (2 * n + 1) * np.arange(0, 2 * n)
less = a < 10000000 / (2 * n + 1) * np.arange(2, 2 * n + 2)
assert np.all(greater & less)
# probs
en, ex = pd.Series.vbt.signals.generate_random_both(
5, entry_prob=0.5, exit_prob=1., seed=seed, index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, False, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=0.5, exit_prob=1., seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=[0., 0.5, 1.], exit_prob=[0., 0.5, 1.],
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[False, True, True],
[False, False, False],
[False, False, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., exit_wait=0,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., entry_pick_first=False, exit_pick_first=True,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., entry_pick_first=True, exit_pick_first=False,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
# none
with pytest.raises(Exception):
pd.DataFrame.vbt.signals.generate_random((5, 3))
def test_generate_random_exits(self):
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_random_exits(seed=seed),
pd.Series(
np.array([False, False, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, False],
[False, False, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(seed=seed, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, True],
[True, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_random_exits(prob=1., seed=seed),
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=[0., 0.5, 1.], seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., wait=0, seed=seed),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., until_next=False, seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_stop_exits(self):
e = pd.Series([True, False, False, False, False, False])
t = pd.Series([2, 3, 4, 3, 2, 1]).astype(np.float64)
# stop loss
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1),
pd.Series(np.array([False, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True),
pd.Series(np.array([False, False, False, True, False, False]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, pick_first=False),
pd.Series(np.array([False, False, False, True, True, True]))
)
pd.testing.assert_frame_equal(
e.vbt.signals.generate_stop_exits(t.vbt.tile(3), [np.nan, -0.5, -1.], trailing=True, pick_first=False),
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, False],
[False, True, False]
]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, exit_wait=3),
pd.Series(np.array([False, False, False, False, True, False]))
)
# take profit
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1),
pd.Series(np.array([False, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True),
pd.Series(np.array([False, False, False, True, False, False]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True, pick_first=False),
pd.Series(np.array([False, False, False, True, True, True]))
)
pd.testing.assert_frame_equal(
e.vbt.signals.generate_stop_exits((4 - t).vbt.tile(3), [np.nan, 0.5, 1.], trailing=True, pick_first=False),
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[False, True, True],
[False, True, True]
]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True, exit_wait=3),
pd.Series(np.array([False, False, False, False, True, False]))
)
# chain
e = pd.Series([True, True, True, True, True, True])
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, True, False]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, True]))
)
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, entry_wait=2, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, False]))
)
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, exit_wait=2, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, True, False]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, False]))
)
# until_next and pick_first
e2 = pd.Series([True, True, True, True, True, True])
t2 = pd.Series([6, 5, 4, 3, 2, 1]).astype(np.float64)
ex = e2.vbt.signals.generate_stop_exits(t2, -0.1, until_next=False, pick_first=False)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, True, True, True, True, True]))
)
def test_generate_ohlc_stop_exits(self):
with pytest.raises(Exception):
_ = mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=-0.1)
with pytest.raises(Exception):
_ = mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=-0.1)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1, trailing=True),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, sl_trail=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=0.1)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, reverse=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1, trailing=True),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, sl_trail=True, reverse=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=0.1, reverse=True)
)
def _test_ohlc_stop_exits(**kwargs):
out_dict = {'stop_price': np.nan, 'stop_type': -1}
result = mask.vbt.signals.generate_ohlc_stop_exits(
price['open'], price['high'], price['low'], price['close'],
out_dict=out_dict, **kwargs
)
if isinstance(result, tuple):
_, ex = result
else:
ex = result
return result, out_dict['stop_price'], out_dict['stop_type']
ex, stop_price, stop_type = _test_ohlc_stop_exits()
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, 0],
[0, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 11.7, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, 1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(tp_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, False],
[False, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, -1],
[-1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True, tp_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(
sl_stop=[np.nan, 0.1, 0.2], sl_trail=True, tp_stop=[np.nan, 0.1, 0.2])
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, True, False],
[False, False, False],
[False, False, True]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 9.6]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, 2, -1],
[-1, -1, -1],
[-1, -1, 1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True, tp_stop=0.1, exit_wait=0)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, True],
[True, True, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[9.0, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 11.7],
[10.8, 9.0, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[1, -1, -1],
[-1, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, 1, -1]
]), index=mask.index, columns=mask.columns)
)
(en, ex), stop_price, stop_type = _test_ohlc_stop_exits(
sl_stop=0.1, sl_trail=True, tp_stop=0.1, chain=True)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
def test_between_ranges(self):
ranges = mask.vbt.signals.between_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 3, 1), (1, 1, 1, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask.vbt.wrapper
mask2 = pd.DataFrame([
[True, True, True],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False]
], index=mask.index, columns=mask.columns)
other_mask = pd.DataFrame([
[False, False, False],
[True, False, False],
[True, True, False],
[False, True, True],
[False, False, True]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.between_ranges(other=other_mask)
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 1, 1, 1), (2, 1, 0, 2, 1),
(3, 1, 1, 2, 1), (4, 2, 0, 3, 1), (5, 2, 1, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
ranges = mask2.vbt.signals.between_ranges(other=other_mask, from_other=True)
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 1, 1), (1, 0, 1, 2, 1), (2, 1, 1, 2, 1),
(3, 1, 1, 3, 1), (4, 2, 1, 3, 1), (5, 2, 1, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_partition_ranges(self):
mask2 = pd.DataFrame([
[False, False, False],
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.partition_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 3, 1), (1, 0, 4, 4, 0), (2, 1, 2, 4, 1), (3, 2, 3, 4, 0)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_between_partition_ranges(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.between_partition_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 3, 1), (1, 1, 2, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_pos_rank(self):
pd.testing.assert_series_equal(
(~mask['a']).vbt.signals.pos_rank(),
pd.Series([-1, 0, 1, -1, 0], index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(after_false=True),
pd.DataFrame(
np.array([
[-1, -1, -1],
[0, -1, -1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 1, -1],
[-1, 2, 2],
[2, -1, 3]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(reset_by=mask['a'], allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 1, -1],
[-1, 0, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(reset_by=mask, allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
def test_partition_pos_rank(self):
pd.testing.assert_series_equal(
(~mask['a']).vbt.signals.partition_pos_rank(),
pd.Series([-1, 0, 0, -1, 1], index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 1, -1],
[-1, 1, 1],
[1, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(after_false=True),
pd.DataFrame(
np.array([
[-1, -1, -1],
[0, -1, -1],
[0, 0, -1],
[-1, 0, 0],
[1, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(reset_by=mask['a']),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 1, -1],
[-1, 0, 0],
[0, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(reset_by=mask),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 0, -1],
[-1, 0, 0],
[0, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
def test_pos_rank_fns(self):
pd.testing.assert_frame_equal(
(~mask).vbt.signals.first(),
pd.DataFrame(
np.array([
[False, True, True],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.nth(1),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, True],
[True, False, False],
[False, True, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.nth(2),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.from_nth(0),
pd.DataFrame(
np.array([
[False, True, True],
[True, False, True],
[True, True, False],
[False, True, True],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
def test_pos_rank_mapped(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
mapped = mask2.vbt.signals.pos_rank_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 1, 0, 0, 1, 0, 0, 1])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 1, 3, 1, 2, 4, 2, 3])
)
assert mapped.wrapper == mask2.vbt.wrapper
def test_partition_pos_rank_mapped(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
mapped = mask2.vbt.signals.partition_pos_rank_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 0, 1, 0, 0, 1, 0, 0])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 1, 3, 1, 2, 4, 2, 3])
)
assert mapped.wrapper == mask2.vbt.wrapper
def test_nth_index(self):
assert mask['a'].vbt.signals.nth_index(0) == pd.Timestamp('2020-01-01 00:00:00')
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(0),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-02 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-1),
pd.Series([
pd.Timestamp('2020-01-04 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-2),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-02 00:00:00'),
np.nan
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(0, group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=['g1', 'g2'], name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-1, group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=['g1', 'g2'], name='nth_index', dtype='datetime64[ns]')
)
def test_norm_avg_index(self):
assert mask['a'].vbt.signals.norm_avg_index() == -0.25
pd.testing.assert_series_equal(
mask.vbt.signals.norm_avg_index(),
pd.Series([-0.25, 0.25, 0.0], index=mask.columns, name='norm_avg_index')
)
pd.testing.assert_series_equal(
mask.vbt.signals.norm_avg_index(group_by=group_by),
pd.Series([0.0, 0.0], index=['g1', 'g2'], name='norm_avg_index')
)
def test_index_mapped(self):
mapped = mask.vbt.signals.index_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 3, 1, 4, 2])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 1, 1, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 3, 1, 4, 2])
)
assert mapped.wrapper == mask.vbt.wrapper
def test_total(self):
assert mask['a'].vbt.signals.total() == 2
pd.testing.assert_series_equal(
mask.vbt.signals.total(),
pd.Series([2, 2, 1], index=mask.columns, name='total')
)
pd.testing.assert_series_equal(
mask.vbt.signals.total(group_by=group_by),
pd.Series([4, 1], index=['g1', 'g2'], name='total')
)
def test_rate(self):
assert mask['a'].vbt.signals.rate() == 0.4
pd.testing.assert_series_equal(
mask.vbt.signals.rate(),
pd.Series([0.4, 0.4, 0.2], index=mask.columns, name='rate')
)
pd.testing.assert_series_equal(
mask.vbt.signals.rate(group_by=group_by),
pd.Series([0.4, 0.2], index=['g1', 'g2'], name='rate')
)
def test_total_partitions(self):
assert mask['a'].vbt.signals.total_partitions() == 2
pd.testing.assert_series_equal(
mask.vbt.signals.total_partitions(),
pd.Series([2, 2, 1], index=mask.columns, name='total_partitions')
)
pd.testing.assert_series_equal(
mask.vbt.signals.total_partitions(group_by=group_by),
pd.Series([4, 1], index=['g1', 'g2'], name='total_partitions')
)
def test_partition_rate(self):
assert mask['a'].vbt.signals.partition_rate() == 1.0
pd.testing.assert_series_equal(
mask.vbt.signals.partition_rate(),
pd.Series([1.0, 1.0, 1.0], index=mask.columns, name='partition_rate')
)
pd.testing.assert_series_equal(
mask.vbt.signals.partition_rate(group_by=group_by),
pd.Series([1.0, 1.0], index=['g1', 'g2'], name='partition_rate')
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Total', 'Rate [%]', 'First Index',
'Last Index', 'Norm Avg Index [-1, 1]', 'Distance: Min',
'Distance: Max', 'Distance: Mean', 'Distance: Std', 'Total Partitions',
'Partition Rate [%]', 'Partition Length: Min', 'Partition Length: Max',
'Partition Length: Mean', 'Partition Length: Std',
'Partition Distance: Min', 'Partition Distance: Max',
'Partition Distance: Mean', 'Partition Distance: Std'
], dtype='object')
pd.testing.assert_series_equal(
mask.vbt.signals.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
1.6666666666666667,
33.333333333333336,
pd.Timestamp('2020-01-02 00:00:00'),
pd.Timestamp('2020-01-04 00:00:00'),
0.0,
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan,
1.6666666666666667,
100.0,
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mask.vbt.signals.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
2,
40.0,
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-04 00:00:00'),
-0.25,
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan,
2,
100.0,
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mask.vbt.signals.stats(column='a', settings=dict(to_timedelta=False)),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'), 5, 2, 40.0,
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-04 00:00:00'), -0.25, 3.0,
3.0, 3.0, np.nan, 2, 100.0, 1.0, 1.0, 1.0, 0.0, 3.0, 3.0, 3.0, np.nan
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mask.vbt.signals.stats(column='a', settings=dict(other=mask['b'], from_other=True)),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
2,
40.0,
0,
0.0,
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-04 00:00:00'),
-0.25,
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'),
2,
100.0,
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan
],
index=pd.Index([
'Start', 'End', 'Period', 'Total', 'Rate [%]', 'Total Overlapping',
'Overlapping Rate [%]', 'First Index', 'Last Index',
'Norm Avg Index [-1, 1]', 'Distance <- Other: Min',
'Distance <- Other: Max', 'Distance <- Other: Mean',
'Distance <- Other: Std', 'Total Partitions', 'Partition Rate [%]',
'Partition Length: Min', 'Partition Length: Max',
'Partition Length: Mean', 'Partition Length: Std',
'Partition Distance: Min', 'Partition Distance: Max',
'Partition Distance: Mean', 'Partition Distance: Std'
], dtype='object'),
name='a'
)
)
pd.testing.assert_series_equal(
mask.vbt.signals.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
4,
40.0,
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
0.0,
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'),
4,
100.0,
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
| pd.Timedelta('0 days 00:00:00') | pandas.Timedelta |
from flowsa.common import WITHDRAWN_KEYWORD
from flowsa.flowbyfunctions import assign_fips_location_system
from flowsa.location import US_FIPS
import math
import pandas as pd
import io
from flowsa.settings import log
from string import digits
YEARS_COVERED = {
"asbestos": "2014-2018",
"barite": "2014-2018",
"bauxite": "2013-2017",
"beryllium": "2014-2018",
"boron": "2014-2018",
"chromium": "2014-2018",
"clay": "2015-2016",
"cobalt": "2013-2017",
"copper": "2011-2015",
"diatomite": "2014-2018",
"feldspar": "2013-2017",
"fluorspar": "2013-2017",
"fluorspar_inports": ["2016", "2017"],
"gallium": "2014-2018",
"garnet": "2014-2018",
"gold": "2013-2017",
"graphite": "2013-2017",
"gypsum": "2014-2018",
"iodine": "2014-2018",
"ironore": "2014-2018",
"kyanite": "2014-2018",
"lead": "2012-2018",
"lime": "2014-2018",
"lithium": "2013-2017",
"magnesium": "2013-2017",
"manganese": "2012-2016",
"manufacturedabrasive": "2017-2018",
"mica": "2014-2018",
"molybdenum": "2014-2018",
"nickel": "2012-2016",
"niobium": "2014-2018",
"peat": "2014-2018",
"perlite": "2013-2017",
"phosphate": "2014-2018",
"platinum": "2014-2018",
"potash": "2014-2018",
"pumice": "2014-2018",
"rhenium": "2014-2018",
"salt": "2013-2017",
"sandgravelconstruction": "2013-2017",
"sandgravelindustrial": "2014-2018",
"silver": "2012-2016",
"sodaash": "2010-2017",
"sodaash_t4": ["2016", "2017"],
"stonecrushed": "2013-2017",
"stonedimension": "2013-2017",
"strontium": "2014-2018",
"talc": "2013-2017",
"titanium": "2013-2017",
"tungsten": "2013-2017",
"vermiculite": "2014-2018",
"zeolites": "2014-2018",
"zinc": "2013-2017",
"zirconium": "2013-2017",
}
def usgs_myb_year(years, current_year_str):
"""
Sets the column for the string based on the year. Checks that the year
you picked is in the last file.
:param years: string, with hypthon
:param current_year_str: string, year of interest
:return: string, year
"""
years_array = years.split("-")
lower_year = int(years_array[0])
upper_year = int(years_array[1])
current_year = int(current_year_str)
if lower_year <= current_year <= upper_year:
column_val = current_year - lower_year + 1
return "year_" + str(column_val)
else:
log.info("Your year is out of scope. Pick a year between %s and %s",
lower_year, upper_year)
def usgs_myb_name(USGS_Source):
"""
Takes the USGS source name and parses it so it can be used in other parts
of Flow by activity.
:param USGS_Source: string, usgs source name
:return:
"""
source_split = USGS_Source.split("_")
name_cc = str(source_split[2])
name = ""
for char in name_cc:
if char.isupper():
name = name + " " + char
else:
name = name + char
name = name.lower()
name = name.strip()
return name
def usgs_myb_static_variables():
"""
Populates the data values for Flow by activity that are the same
for all of USGS_MYB Files
:return:
"""
data = {}
data["Class"] = "Geological"
data['FlowType'] = "ELEMENTARY_FLOWS"
data["Location"] = US_FIPS
data["Compartment"] = "ground"
data["Context"] = None
data["ActivityConsumedBy"] = None
return data
def usgs_myb_remove_digits(value_string):
"""
Eliminates numbers in a string
:param value_string:
:return:
"""
remove_digits = str.maketrans('', '', digits)
return_string = value_string.translate(remove_digits)
return return_string
def usgs_myb_url_helper(*, build_url, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:param args: dictionary, arguments specified when running flowbyactivity.py
flowbyactivity.py ('year' and 'source')
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
return [build_url]
def usgs_asbestos_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[4:11]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 12:
for x in range(12, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['asbestos'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_asbestos_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
product = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Exports and reexports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(dataframe,
str(year))
return dataframe
def usgs_barite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(
io.BytesIO(resp.content), sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:14]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['barite'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_barite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:3":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Crude, sold or used by producers:":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports:2":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_bauxite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[6:14]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one. columns) == 11:
df_data_one.columns = ["Production", "space_2", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['bauxite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_bauxite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Total"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['bauxite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Production":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, as shipped:":
prod = "import"
elif df.iloc[index]["Production"].strip() == \
"Exports, as shipped:":
prod = "export"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
flow_amount = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = flow_amount
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_beryllium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T4')
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data_two.loc[6:9]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[12:12]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_2.columns) > 11:
for x in range(11, len(df_data_2.columns)):
col_name = "Unnamed: " + str(x)
del df_data_2[col_name]
if len(df_data_1. columns) == 11:
df_data_1.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
if len(df_data_2. columns) == 11:
df_data_2.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['beryllium'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_beryllium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["United States6", "Mine shipments1",
"Imports for consumption, beryl2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['beryllium'], year)
for df in df_list:
for index, row in df.iterrows():
prod = "production"
if df.iloc[index]["Production"].strip() == \
"Imports for consumption, beryl2":
prod = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data["Description"] = name
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_boron_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data.loc[8:8]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
df_data_two = pd.DataFrame(df_raw_data.loc[21:22]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
df_data_three = pd.DataFrame(df_raw_data.loc[27:28]).reindex()
df_data_three = df_data_three.reset_index()
del df_data_three["index"]
if len(df_data_one. columns) == 11:
df_data_one.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
df_data_two.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
df_data_three.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['boron'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
del df_data_two[col]
del df_data_three[col]
frames = [df_data_one, df_data_two, df_data_three]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_boron_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["B2O3 content", "Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['boron'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "B2O3 content" or \
df.iloc[index]["Production"].strip() == "Quantity":
product = "production"
if df.iloc[index]["Production"].strip() == "Colemanite:4":
des = "Colemanite"
elif df.iloc[index]["Production"].strip() == "Ulexite:4":
des = "Ulexite"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
if des == name:
data['FlowName'] = name + " " + product
else:
data['FlowName'] = name + " " + product + " " + des
data["Description"] = des
data["ActivityProducedBy"] = name
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_chromium_call(*, resp, year, **_):
""""
Convert response for calling url to pandas dataframe,
begin parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[4:24]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
elif len(df_data. columns) == 13:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5", "space_6"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['chromium'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_chromium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Secondary2", "Total"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['chromium'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Imports:":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Secondary2":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['chromium'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_clay_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data_ball = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T3')
df_data_ball = pd.DataFrame(df_raw_data_ball.loc[19:19]).reindex()
df_data_ball = df_data_ball.reset_index()
del df_data_ball["index"]
df_raw_data_bentonite = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T4 ')
df_data_bentonite = pd.DataFrame(
df_raw_data_bentonite.loc[28:28]).reindex()
df_data_bentonite = df_data_bentonite.reset_index()
del df_data_bentonite["index"]
df_raw_data_common = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T5 ')
df_data_common = pd.DataFrame(df_raw_data_common.loc[40:40]).reindex()
df_data_common = df_data_common.reset_index()
del df_data_common["index"]
df_raw_data_fire = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T6 ')
df_data_fire = pd.DataFrame(df_raw_data_fire.loc[12:12]).reindex()
df_data_fire = df_data_fire.reset_index()
del df_data_fire["index"]
df_raw_data_fuller = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T7 ')
df_data_fuller = pd.DataFrame(df_raw_data_fuller.loc[17:17]).reindex()
df_data_fuller = df_data_fuller.reset_index()
del df_data_fuller["index"]
df_raw_data_kaolin = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T8 ')
df_data_kaolin = pd.DataFrame(df_raw_data_kaolin.loc[18:18]).reindex()
df_data_kaolin = df_data_kaolin.reset_index()
del df_data_kaolin["index"]
df_raw_data_export = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T13')
df_data_export = pd.DataFrame(df_raw_data_export.loc[6:15]).reindex()
df_data_export = df_data_export.reset_index()
del df_data_export["index"]
df_raw_data_import = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T14')
df_data_import = pd.DataFrame(df_raw_data_import.loc[6:13]).reindex()
df_data_import = df_data_import.reset_index()
del df_data_import["index"]
df_data_ball.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_bentonite.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_common.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_fire.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_fuller.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_kaolin.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_export.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2", "space_5", "extra"]
df_data_import.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2", "space_5", "extra"]
df_data_ball["type"] = "Ball clay"
df_data_bentonite["type"] = "Bentonite"
df_data_common["type"] = "Common clay"
df_data_fire["type"] = "Fire clay"
df_data_fuller["type"] = "Fuller’s earth"
df_data_kaolin["type"] = "Kaolin"
df_data_export["type"] = "export"
df_data_import["type"] = "import"
col_to_use = ["Production", "type"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['clay'], year))
for col in df_data_import.columns:
if col not in col_to_use:
del df_data_import[col]
del df_data_export[col]
for col in df_data_ball.columns:
if col not in col_to_use:
del df_data_ball[col]
del df_data_bentonite[col]
del df_data_common[col]
del df_data_fire[col]
del df_data_fuller[col]
del df_data_kaolin[col]
frames = [df_data_import, df_data_export, df_data_ball, df_data_bentonite,
df_data_common, df_data_fire, df_data_fuller, df_data_kaolin]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_clay_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Ball clay", "Bentonite", "Fire clay", "Kaolin",
"Fuller’s earth", "Total", "Grand total",
"Artificially activated clay and earth",
"Clays, not elsewhere classified",
"Clays, not elsewhere classified"]
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["type"].strip() == "import":
product = "imports"
elif df.iloc[index]["type"].strip() == "export":
product = "exports"
else:
product = "production"
if str(df.iloc[index]["Production"]).strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
if product == "production":
data['FlowName'] = \
df.iloc[index]["type"].strip() + " " + product
data["Description"] = df.iloc[index]["type"].strip()
data["ActivityProducedBy"] = df.iloc[index]["type"].strip()
else:
data['FlowName'] = \
df.iloc[index]["Production"].strip() + " " + product
data["Description"] = df.iloc[index]["Production"].strip()
data["ActivityProducedBy"] = \
df.iloc[index]["Production"].strip()
col_name = usgs_myb_year(YEARS_COVERED['clay'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)" or \
str(df.iloc[index][col_name]) == "(2)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_cobalt_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T8')
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data_two.loc[6:11]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[23:23]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_2.columns) > 11:
for x in range(11, len(df_data_2.columns)):
col_name = "Unnamed: " + str(x)
del df_data_2[col_name]
if len(df_data_1. columns) == 12:
df_data_1.columns = ["Production", "space_6", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
if len(df_data_2. columns) == 11:
df_data_2.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['cobalt'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_cobalt_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
row_to_use = ["United Statese, 16, 17", "Mine productione",
"Imports for consumption", "Exports"]
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
prod = "production"
if df.iloc[index]["Production"].strip() == \
"United Statese, 16, 17":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Exports":
prod = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['cobalt'], year)
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
data["FlowAmount"] = str(df.iloc[index][col_name])
remove_rows = ["(18)", "(2)"]
if data["FlowAmount"] not in remove_rows:
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_copper_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data.loc[12:12]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[30:31]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_1. columns) == 12:
df_data_1.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
df_data_2.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production", "Unit"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['copper'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_copper_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
if product == "Total":
prod = "production"
elif product == "Exports, refined":
prod = "exports"
elif product == "Imports, refined":
prod = "imports"
data["ActivityProducedBy"] = "Copper; Mine"
data['FlowName'] = name + " " + prod
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['copper'], year)
data["Description"] = "Copper; Mine"
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_diatomite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[7:10]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) == 10:
df_data_one.columns = ["Production", "year_1", "space_2", "year_2",
"space_3", "year_3", "space_4", "year_4",
"space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['diatomite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_diatomite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Exports2", "Imports for consumption2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports2":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption2":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Quantity":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand metric tons"
col_name = usgs_myb_year(YEARS_COVERED['diatomite'], year)
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = name
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_feldspar_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_two = pd.DataFrame(df_raw_data_two.loc[4:8]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
df_data_one = pd.DataFrame(df_raw_data_two.loc[10:15]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_two. columns) == 13:
df_data_two.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
df_data_one.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['feldspar'], year))
for col in df_data_two.columns:
if col not in col_to_use:
del df_data_two[col]
del df_data_one[col]
frames = [df_data_two, df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_feldspar_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Quantity3"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports, feldspar:4":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:4":
prod = "imports"
elif df.iloc[index]["Production"].strip() == \
"Production, feldspar:e, 2":
prod = "production"
elif df.iloc[index]["Production"].strip() == "Nepheline syenite:":
prod = "production"
des = "Nepheline syenite"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['feldspar'], year)
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = des
data["ActivityProducedBy"] = name
if name == des:
data['FlowName'] = name + " " + prod
else:
data['FlowName'] = name + " " + prod + " " + des
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_fluorspar_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
if year in YEARS_COVERED['fluorspar_inports']:
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T2')
df_raw_data_three = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T7')
df_raw_data_four = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T8')
df_data_one = pd.DataFrame(df_raw_data_one.loc[5:15]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if year in YEARS_COVERED['fluorspar_inports']:
df_data_two = pd.DataFrame(df_raw_data_two.loc[7:8]).reindex()
df_data_three = pd.DataFrame(df_raw_data_three.loc[19:19]).reindex()
df_data_four = pd.DataFrame(df_raw_data_four.loc[11:11]).reindex()
if len(df_data_two.columns) == 13:
df_data_two.columns = ["Production", "space_1", "not_1", "space_2",
"not_2", "space_3", "not_3", "space_4",
"not_4", "space_5", "year_4", "space_6",
"year_5"]
if len(df_data_three.columns) == 9:
df_data_three.columns = ["Production", "space_1", "year_4",
"space_2", "not_1", "space_3", "year_5",
"space_4", "not_2"]
df_data_four.columns = ["Production", "space_1", "year_4",
"space_2", "not_1", "space_3", "year_5",
"space_4", "not_2"]
if len(df_data_one. columns) == 13:
df_data_one.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['fluorspar'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
if year in YEARS_COVERED['fluorspar_inports']:
for col in df_data_two.columns:
if col not in col_to_use:
del df_data_two[col]
for col in df_data_three.columns:
if col not in col_to_use:
del df_data_three[col]
for col in df_data_four.columns:
if col not in col_to_use:
del df_data_four[col]
df_data_one["type"] = "data_one"
if year in YEARS_COVERED['fluorspar_inports']:
# aluminum fluoride
# cryolite
df_data_two["type"] = "data_two"
df_data_three["type"] = "Aluminum Fluoride"
df_data_four["type"] = "Cryolite"
frames = [df_data_one, df_data_two, df_data_three, df_data_four]
else:
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_fluorspar_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Quantity3", "Total", "Hydrofluoric acid",
"Metallurgical", "Production"]
prod = ""
name = usgs_myb_name(source)
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports:3":
prod = "exports"
des = name
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:3":
prod = "imports"
des = name
elif df.iloc[index]["Production"].strip() == "Fluorosilicic acid:":
prod = "production"
des = "Fluorosilicic acid:"
if str(df.iloc[index]["type"]).strip() == "data_two":
prod = "imports"
des = df.iloc[index]["Production"].strip()
elif str(df.iloc[index]["type"]).strip() == \
"Aluminum Fluoride" or \
str(df.iloc[index]["type"]).strip() == "Cryolite":
prod = "imports"
des = df.iloc[index]["type"].strip()
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['fluorspar'], year)
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_gallium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[5:7]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 11:
for x in range(11, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data.columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['gallium'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_gallium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production, primary crude", "Metal"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['gallium'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Production, primary crude":
product = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Kilograms"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['gallium'], year)
if str(df.iloc[index][col_name]).strip() == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_garnet_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_two = pd.DataFrame(df_raw_data_two.loc[4:5]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
df_data_one = pd.DataFrame(df_raw_data_two.loc[10:14]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) > 13:
for x in range(13, len(df_data_one.columns)):
col_name = "Unnamed: " + str(x)
del df_data_one[col_name]
del df_data_two[col_name]
if len(df_data_two. columns) == 13:
df_data_two.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
df_data_one.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['garnet'], year))
for col in df_data_two.columns:
if col not in col_to_use:
del df_data_two[col]
del df_data_one[col]
frames = [df_data_two, df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_garnet_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports:2":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption: 3":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Crude production:":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['garnet'], year)
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_gold_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[6:14]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) == 13:
df_data.columns = ["Production", "Space", "Units", "space_1",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['gold'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_gold_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Exports, refined bullion",
"Imports for consumption, refined bullion"]
dataframe = pd.DataFrame()
product = "production"
name = usgs_myb_name(source)
des = name
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Quantity":
product = "production"
elif df.iloc[index]["Production"].strip() == \
"Exports, refined bullion":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, refined bullion":
product = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "kilograms"
data['FlowName'] = name + " " + product
data["Description"] = des
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['gold'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_graphite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[5:9]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 13:
df_data.columns = ["Production", "space_1", "Unit", "space_6",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['graphite'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_graphite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantiy", "Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['graphite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['graphite'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_gypsum_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[7:10]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) > 11:
for x in range(11, len(df_data_one.columns)):
col_name = "Unnamed: " + str(x)
del df_data_one[col_name]
if len(df_data_one.columns) == 11:
df_data_one.columns = ["Production", "space_1", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['gypsum'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_gypsum_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Imports for consumption"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['gypsum'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Quantity":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_iodine_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[6:10]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
elif len(df_data. columns) == 13:
df_data.columns = ["Production", "unit", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5", "space_6"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['iodine'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_iodine_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Quantity, for consumption", "Exports2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['iodine'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Imports:2":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Production":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports2":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['iodine'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_iron_ore_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:25]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Units", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production", "Units"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['ironore'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_iron_ore_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
row_to_use = ["Gross weight", "Quantity"]
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Production:":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data['FlowName'] = "Iron Ore " + product
data["Description"] = "Iron Ore"
data["ActivityProducedBy"] = "Iron Ore"
col_name = usgs_myb_year(YEARS_COVERED['ironore'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_kyanite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[4:13]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one. columns) == 12:
df_data_one.columns = ["Production", "unit", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['kyanite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_kyanite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Quantity2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['kyanite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Exports of kyanite concentrate:3":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, all kyanite minerals:3":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Production:":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_lead_url_helper(*, year, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
if int(year) < 2013:
build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/'
'palladium/production/atoms/files/myb1-2016-lead.xls')
elif int(year) < 2014:
build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/'
'palladium/production/atoms/files/myb1-2017-lead.xls')
else:
build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/'
'palladium/production/s3fs-public/media/files/myb1-2018-lead-advrel.xlsx')
url = build_url
return [url]
def usgs_lead_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[8:15]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 12:
for x in range(12, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Units", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production", "Units"]
if int(year) == 2013:
modified_sy = "2013-2018"
col_to_use.append(usgs_myb_year(modified_sy, year))
elif int(year) > 2013:
modified_sy = "2014-2018"
col_to_use.append(usgs_myb_year(modified_sy, year))
else:
col_to_use.append(usgs_myb_year(YEARS_COVERED['lead'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_lead_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
row_to_use = ["Primary lead, refined content, "
"domestic ores and base bullion",
"Secondary lead, lead content",
"Lead ore and concentrates", "Lead in base bullion"]
import_export = ["Exports, lead content:",
"Imports for consumption, lead content:"]
dataframe = pd.DataFrame()
product = "production"
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() in import_export:
if df.iloc[index]["Production"].strip() == \
"Exports, lead content:":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, lead content:":
product = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["ActivityProducedBy"] = df.iloc[index]["Production"]
if int(year) == 2013:
modified_sy = "2013-2018"
col_name = usgs_myb_year(modified_sy, year)
elif int(year) > 2013:
modified_sy = "2014-2018"
col_name = usgs_myb_year(modified_sy, year)
else:
col_name = usgs_myb_year(YEARS_COVERED['lead'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_lime_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data_two.loc[16:16]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data_two.loc[28:32]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_1.columns) > 12:
for x in range(12, len(df_data_1.columns)):
col_name = "Unnamed: " + str(x)
del df_data_1[col_name]
del df_data_2[col_name]
if len(df_data_1. columns) == 12:
df_data_1.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
df_data_2.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['lime'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_lime_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Total", "Quantity"]
import_export = ["Exports:7", "Imports for consumption:7"]
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
prod = "production"
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports:7":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:7":
prod = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['lime'], year)
data["Description"] = des
data["ActivityProducedBy"] = name
if product.strip() == "Total":
data['FlowName'] = name + " " + prod
elif product.strip() == "Quantity":
data['FlowName'] = name + " " + prod
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_lithium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[6:8]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) > 11:
for x in range(11, len(df_data_one.columns)):
col_name = "Unnamed: " + str(x)
del df_data_one[col_name]
if len(df_data_one. columns) == 11:
df_data_one.columns = ["Production", "space_2", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['lithium'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_lithium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Exports3", "Imports3", "Production"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['lithium'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports3":
prod = "exports"
elif df.iloc[index]["Production"].strip() == "Imports3":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Production":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_magnesium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:15]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Units", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['magnesium'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_magnesium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Secondary", "Primary", "Exports", "Imports for consumption"]
dataframe = pd.DataFrame()
name = usgs_myb_name(source)
des = name
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Secondary" or \
df.iloc[index]["Production"].strip() == "Primary":
product = "production" + " " + \
df.iloc[index]["Production"].strip()
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['magnesium'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_manganese_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:9]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 12:
for x in range(12, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['manganese'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_manganese_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Exports", "Imports for consumption"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['manganese'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Production":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['manganese'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_ma_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T2')
df_data = pd.DataFrame(df_raw_data.loc[6:7]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 9:
for x in range(9, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 9:
df_data.columns = ["Product", "space_1", "quality_year_1", "space_2",
"value_year_1", "space_3",
"quality_year_2", "space_4", "value_year_2"]
elif len(df_data. columns) == 9:
df_data.columns = ["Product", "space_1", "quality_year_1", "space_2",
"value_year_1", "space_3",
"quality_year_2", "space_4", "value_year_2"]
col_to_use = ["Product"]
col_to_use.append("quality_"
+ usgs_myb_year(YEARS_COVERED['manufacturedabrasive'],
year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_ma_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Silicon carbide"]
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Product"].strip().translate(remove_digits)
if product in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data['FlowName'] = "Silicon carbide"
data["ActivityProducedBy"] = "Silicon carbide"
data["Unit"] = "Metric Tons"
col_name = ("quality_"
+ usgs_myb_year(
YEARS_COVERED['manufacturedabrasive'], year))
col_name_array = col_name.split("_")
data["Description"] = product + " " + col_name_array[0]
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_mica_call(*, resp, source, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[4:6]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
name = usgs_myb_name(source)
des = name
if len(df_data_one. columns) == 12:
df_data_one.columns = ["Production", "Unit", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['mica'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_mica_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['mica'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Production, sold or used by producers:":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_molybdenum_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:11]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['molybdenum'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_molybdenum_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Imports for consumption", "Exports"]
dataframe = pd.DataFrame()
name = usgs_myb_name(source)
des = name
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Production":
product = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = des
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['molybdenum'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_nickel_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T10')
df_data_1 = pd.DataFrame(df_raw_data.loc[36:36]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_2 = pd.DataFrame(df_raw_data_two.loc[11:16]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_1.columns) > 11:
for x in range(11, len(df_data_1.columns)):
col_name = "Unnamed: " + str(x)
del df_data_1[col_name]
if len(df_data_1. columns) == 11:
df_data_1.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
if len(df_data_2.columns) == 12:
df_data_2.columns = ["Production", "space_1", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['nickel'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_nickel_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Ores and concentrates3",
"United States, sulfide ore, concentrate"]
import_export = ["Exports:", "Imports for consumption:"]
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
prod = "production"
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports:":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
prod = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['nickel'], year)
if product.strip() == \
"United States, sulfide ore, concentrate":
data["Description"] = \
"United States, sulfide ore, concentrate Nickel"
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
elif product.strip() == "Ores and concentrates":
data["Description"] = "Ores and concentrates Nickel"
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(4)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_niobium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[4:19]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 13:
for x in range(13, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 13:
df_data.columns = ["Production", "space_1", "Unit_1", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['niobium'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_niobium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Total imports, Nb content", "Total exports, Nb content"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['niobium'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['niobium'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_peat_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
"""Calls the excel sheet for nickel and removes extra columns"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[7:18]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) > 12:
for x in range(12, len(df_data_one.columns)):
col_name = "Unnamed: " + str(x)
del df_data_one[col_name]
if len(df_data_one.columns) == 12:
df_data_one.columns = ["Production", "Unit", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['peat'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_peat_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Exports", "Imports for consumption"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['peat'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Production":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption":
prod = "import"
elif df.iloc[index]["Production"].strip() == "Exports":
prod = "export"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_perlite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[6:6]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
df_data_two = pd.DataFrame(df_raw_data_one.loc[20:25]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
if len(df_data_one. columns) == 12:
df_data_one.columns = ["Production", "space_1", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
df_data_two.columns = ["Production", "space_1", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['perlite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
del df_data_two[col]
frames = [df_data_one, df_data_two]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_perlite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Mine production2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['perlite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Mine production2":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:3":
prod = "import"
elif df.iloc[index]["Production"].strip() == "Exports:3":
prod = "export"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_phosphate_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[7:9]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
df_data_two = pd.DataFrame(df_raw_data_one.loc[19:21]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
if len(df_data_one.columns) > 12:
for x in range(11, len(df_data_one.columns)):
col_name = "Unnamed: " + str(x)
del df_data_one[col_name]
del df_data_two[col_name]
if len(df_data_one. columns) == 12:
df_data_one.columns = ["Production", "unit", "space_1", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
df_data_two.columns = ["Production", "unit", "space_1", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['phosphate'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
del df_data_two[col]
frames = [df_data_one, df_data_two]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_phosphate_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Gross weight", "Quantity, gross weight"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['phosphate'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Marketable production:":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:3":
prod = "import"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_platinum_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = | pd.DataFrame(df_raw_data.loc[4:9]) | pandas.DataFrame |
from __future__ import print_function
import unittest
from unittest import mock
from io import BytesIO, StringIO
import random
import six
import os
import re
import logging
import numpy as np
import pandas as pd
from . import utils as test_utils
import dataprofiler as dp
from dataprofiler.profilers.profile_builder import StructuredColProfiler, \
UnstructuredProfiler, UnstructuredCompiler, StructuredProfiler, Profiler
from dataprofiler.profilers.profiler_options import ProfilerOptions, \
StructuredOptions, UnstructuredOptions
from dataprofiler.profilers.column_profile_compilers import \
ColumnPrimitiveTypeProfileCompiler, ColumnStatsProfileCompiler, \
ColumnDataLabelerCompiler
from dataprofiler import StructuredDataLabeler, UnstructuredDataLabeler
from dataprofiler.profilers.helpers.report_helpers import _prepare_report
test_root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def setup_save_mock_open(mock_open):
mock_file = BytesIO()
mock_file.close = lambda: None
mock_open.side_effect = lambda *args: mock_file
return mock_file
class TestStructuredProfiler(unittest.TestCase):
@classmethod
def setUp(cls):
test_utils.set_seed(seed=0)
@classmethod
def setUpClass(cls):
test_utils.set_seed(seed=0)
cls.input_file_path = os.path.join(
test_root_path, 'data', 'csv/aws_honeypot_marx_geo.csv'
)
cls.aws_dataset = pd.read_csv(cls.input_file_path)
profiler_options = ProfilerOptions()
profiler_options.set({'data_labeler.is_enabled': False})
with test_utils.mock_timeit():
cls.trained_schema = dp.StructuredProfiler(
cls.aws_dataset, len(cls.aws_dataset), options=profiler_options)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_bad_input_data(self, *mocks):
allowed_data_types = (r"\(<class 'list'>, "
r"<class 'pandas.core.series.Series'>, "
r"<class 'pandas.core.frame.DataFrame'>\)")
bad_data_types = [1, {}, np.inf, 'sdfs']
for data in bad_data_types:
with self.assertRaisesRegex(TypeError,
r"Data must either be imported using "
r"the data_readers or using one of the "
r"following: " + allowed_data_types):
StructuredProfiler(data)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
def test_list_data(self, *mocks):
data = [[1, 1],
[None, None],
[3, 3],
[4, 4],
[5, 5],
[None, None],
[1, 1]]
with test_utils.mock_timeit():
profiler = dp.StructuredProfiler(data)
# test properties
self.assertEqual("<class 'list'>", profiler.file_type)
self.assertIsNone(profiler.encoding)
self.assertEqual(2, profiler.row_has_null_count)
self.assertEqual(2, profiler.row_is_null_count)
self.assertEqual(7, profiler.total_samples)
self.assertEqual(5, len(profiler.hashed_row_dict))
self.assertListEqual([0, 1], list(profiler._col_name_to_idx.keys()))
self.assertIsNone(profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 1}, profiler.times)
# validates the sample out maintains the same visual data format as the
# input.
self.assertListEqual(['5', '1', '1', '3', '4'],
profiler.profile[0].sample)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
def test_pandas_series_data(self, *mocks):
data = pd.Series([1, None, 3, 4, 5, None, 1])
with test_utils.mock_timeit():
profiler = dp.StructuredProfiler(data)
# test properties
self.assertEqual(
"<class 'pandas.core.series.Series'>", profiler.file_type)
self.assertIsNone(profiler.encoding)
self.assertEqual(2, profiler.row_has_null_count)
self.assertEqual(2, profiler.row_is_null_count)
self.assertEqual(7, profiler.total_samples)
self.assertEqual(5, len(profiler.hashed_row_dict))
self.assertListEqual([0], list(profiler._col_name_to_idx.keys()))
self.assertIsNone(profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 1}, profiler.times)
# test properties when series has name
data.name = 'test'
profiler = dp.StructuredProfiler(data)
self.assertEqual(
"<class 'pandas.core.series.Series'>", profiler.file_type)
self.assertIsNone(profiler.encoding)
self.assertEqual(2, profiler.row_has_null_count)
self.assertEqual(2, profiler.row_is_null_count)
self.assertEqual(7, profiler.total_samples)
self.assertEqual(5, len(profiler.hashed_row_dict))
self.assertListEqual(['test'], list(profiler._col_name_to_idx.keys()))
self.assertIsNone(profiler.correlation_matrix)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._merge_correlation')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_chi2')
def test_add_profilers(self, *mocks):
data = pd.DataFrame([1, None, 3, 4, 5, None, 1])
with test_utils.mock_timeit():
profile1 = dp.StructuredProfiler(data[:2])
profile2 = dp.StructuredProfiler(data[2:])
# test incorrect type
with self.assertRaisesRegex(TypeError,
'`StructuredProfiler` and `int` are '
'not of the same profiler type.'):
profile1 + 3
# test mismatched profiles
profile2._profile.pop(0)
profile2._col_name_to_idx.pop(0)
with self.assertRaisesRegex(ValueError,
"Cannot merge empty profiles."):
profile1 + profile2
# test mismatched profiles due to options
profile2._profile.append(None)
profile2._col_name_to_idx[0] = [0]
with self.assertRaisesRegex(ValueError,
'The two profilers were not setup with the '
'same options, hence they do not calculate '
'the same profiles and cannot be added '
'together.'):
profile1 + profile2
# test success
profile1._profile = [1]
profile1._col_name_to_idx = {"test": [0]}
profile2._profile = [2]
profile2._col_name_to_idx = {"test": [0]}
merged_profile = profile1 + profile2
self.assertEqual(3, merged_profile._profile[
merged_profile._col_name_to_idx["test"][0]])
self.assertIsNone(merged_profile.encoding)
self.assertEqual(
"<class 'pandas.core.frame.DataFrame'>", merged_profile.file_type)
self.assertEqual(2, merged_profile.row_has_null_count)
self.assertEqual(2, merged_profile.row_is_null_count)
self.assertEqual(7, merged_profile.total_samples)
self.assertEqual(5, len(merged_profile.hashed_row_dict))
self.assertDictEqual({'row_stats': 2}, merged_profile.times)
# test success if drawn from multiple files
profile2.encoding = 'test'
profile2.file_type = 'test'
merged_profile = profile1 + profile2
self.assertEqual('multiple files', merged_profile.encoding)
self.assertEqual('multiple files', merged_profile.file_type)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._get_correlation')
def test_stream_profilers(self, *mocks):
mocks[0].return_value = None
data = pd.DataFrame([
['test1', 1.0],
['test2', None],
['test1', 1.0],
[None, None],
[None, 5.0],
[None, 5.0],
[None, None],
['test3', 7.0]])
# check prior to update
with test_utils.mock_timeit():
profiler = dp.StructuredProfiler(data[:3])
self.assertEqual(1, profiler.row_has_null_count)
self.assertEqual(0, profiler.row_is_null_count)
self.assertEqual(3, profiler.total_samples)
self.assertEqual(2, len(profiler.hashed_row_dict))
self.assertIsNone(profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 1}, profiler.times)
# check after update
with test_utils.mock_timeit():
profiler.update_profile(data[3:])
self.assertIsNone(profiler.encoding)
self.assertEqual(
"<class 'pandas.core.frame.DataFrame'>", profiler.file_type)
self.assertEqual(5, profiler.row_has_null_count)
self.assertEqual(2, profiler.row_is_null_count)
self.assertEqual(8, profiler.total_samples)
self.assertEqual(5, len(profiler.hashed_row_dict))
self.assertIsNone(profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 2}, profiler.times)
def test_correct_unique_row_ratio_test(self):
self.assertEqual(2999, len(self.trained_schema.hashed_row_dict))
self.assertEqual(2999, self.trained_schema.total_samples)
self.assertEqual(1.0, self.trained_schema._get_unique_row_ratio())
def test_correct_rows_ingested(self):
self.assertEqual(2999, self.trained_schema.total_samples)
def test_correct_null_row_ratio_test(self):
self.assertEqual(2999, self.trained_schema.row_has_null_count)
self.assertEqual(1.0, self.trained_schema._get_row_has_null_ratio())
self.assertEqual(0, self.trained_schema.row_is_null_count)
self.assertEqual(0, self.trained_schema._get_row_is_null_ratio())
self.assertEqual(2999, self.trained_schema.total_samples)
def test_correct_duplicate_row_count_test(self):
self.assertEqual(2999, len(self.trained_schema.hashed_row_dict))
self.assertEqual(2999, self.trained_schema.total_samples)
self.assertEqual(0.0, self.trained_schema._get_duplicate_row_count())
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_correlation(self, *mock):
# Use the following formula to obtain the pairwise correlation
# sum((x - np.mean(x))*(y-np.mean(y))) /
# np.sqrt(sum((x - np.mean(x)**2)))/np.sqrt(sum((y - np.mean(y)**2)))
profile_options = dp.ProfilerOptions()
profile_options.set({"correlation.is_enabled": True})
# data with a sole numeric column
data = pd.DataFrame([1.0, 8.0, 1.0, -2.0, 5.0])
with test_utils.mock_timeit():
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([[1.0]])
np.testing.assert_array_equal(expected_corr_mat,
profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 1, 'correlation': 1}, profiler.times)
# data with one column with non-numeric calues
data = pd.DataFrame([1.0, None, 1.0, None, 5.0])
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([[1]])
np.testing.assert_array_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with two columns, but one is numerical
data = pd.DataFrame([
['test1', 1.0],
['test2', None],
['test1', 1.0],
[None, None]])
profiler = dp.StructuredProfiler(data, options=profile_options)
# Even the correlation with itself is NaN because the variance is zero
expected_corr_mat = np.array([
[np.nan, np.nan],
[np.nan, np.nan]
])
np.testing.assert_array_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with multiple numerical columns
data = pd.DataFrame({'a': [3, 2, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': [1, 5, 3, 5, 7, 2, 6, 8, 1, 2]})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([
[1.0, -0.26559388521279237, 0.26594894270403086],
[-0.26559388521279237, 1.0, -0.49072329],
[0.26594894270403086, -0.49072329, 1.0]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with multiple numerical columns, with nan values
data = pd.DataFrame({'a': [np.nan, np.nan, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, np.nan, 4, 2, 5, 6, 3, 9, 8],
'c': [1, 5, 3, 5, 7, 2, 6, 8, np.nan, np.nan]})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([
[1, -0.28527657, 0.18626508],
[-0.28527657, 1, -0.52996792],
[0.18626508, -0.52996792, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with multiple numerical columns, with nan values in only one
# column
data = pd.DataFrame({'a': [np.nan, np.nan, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': [1, 5, 3, 5, 7, 2, 6, 8, 1, 2]})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([
[1, 0.03673504, 0.22844891],
[0.03673504, 1, -0.49072329],
[0.22844891, -0.49072329, 1]])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with only one numerical columns without nan values
data = pd.DataFrame({'a': [3, 2, 1, 7, 5, 9, 4, 10, 7, 2]})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([[1]])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with no numeric columns
data = pd.DataFrame({'a': ['hi', 'hi2', 'hi3'],
'b': ['test1', 'test2', 'test3']})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([
[np.nan, np.nan],
[np.nan, np.nan]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# data with only one numeric column
# data with no numeric columns
data = pd.DataFrame({'a': ['hi', 'hi2', 'hi3'],
'b': ['test1', 'test2', 'test3'],
'c': [1, 2, 3]})
profiler = dp.StructuredProfiler(data, options=profile_options)
expected_corr_mat = np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# Data with null rows
data = pd.DataFrame({'a': [None, 2, 1, np.nan, 5, np.nan, 4, 10, 7, np.nan],
'b': [np.nan, 11, 1, 'nan', 2, np.nan, 6, 3, 9, np.nan],
'c': [np.nan, 5, 3, np.nan, 7, np.nan, 6, 8, 1, None]})
profiler = dp.StructuredProfiler(data, options=profile_options)
# correlation between [2, 1, 5, 4, 10, 7],
# [11, 1, 2, 6, 3, 9],
# [5, 3, 7, 6, 8, 1]
expected_corr_mat = np.array([
[1, -0.06987956, 0.32423975],
[-0.06987956, 1, -0.3613099],
[0.32423975, -0.3613099, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# Data with null rows and some imputed values
data = pd.DataFrame({'a': [None, np.nan, 1, 7, 5, 9, 4, 10, np.nan, 2],
'b': [10, 11, 1, 4, 2, 5, np.nan, 3, np.nan, 8],
'c': [1, 5, 3, 5, np.nan, 2, 6, 8, np.nan, 2]})
profiler = dp.StructuredProfiler(data, options=profile_options)
# correlation between [*38/7*, *38/7*, 1, 7, 5, 9, 4, 10, 2],
# [10, 11, 1, 4, 2, 5, *11/2*, 3, 8],
# [1, 5, 3, 5, *4*, 2, 6, 8, 2]
expected_corr_mat = np.array([
[1, -0.03283837, 0.40038038],
[-0.03283837, 1, -0.30346637],
[0.40038038, -0.30346637, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_merge_correlation(self, *mocks):
# Use the following formular to obtain the pairwise correlation
# sum((x - np.mean(x))*(y-np.mean(y))) /
# np.sqrt(sum((x - np.mean(x)**2)))/np.sqrt(sum((y - np.mean(y)**2)))
profile_options = dp.ProfilerOptions()
profile_options.set({"correlation.is_enabled": True})
# merge between two existing correlations
data = pd.DataFrame({'a': [3, 2, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': [1, 5, 3, 5, 7, 2, 6, 8, 1, 2]})
data1 = data[:5]
data2 = data[5:]
with test_utils.mock_timeit():
profile1 = dp.StructuredProfiler(data1, options=profile_options)
profile2 = dp.StructuredProfiler(data2, options=profile_options)
merged_profile = profile1 + profile2
expected_corr_mat = np.array([
[1.0, -0.26559388521279237, 0.26594894270403086],
[-0.26559388521279237, 1.0, -0.49072329],
[0.26594894270403086, -0.49072329, 1.0]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
merged_profile.correlation_matrix)
self.assertDictEqual({'row_stats': 2, 'correlation': 2},
merged_profile.times)
# merge between an existing corr and None correlation (without data)
with test_utils.mock_timeit():
profile1 = dp.StructuredProfiler(None, options=profile_options)
profile2 = dp.StructuredProfiler(data, options=profile_options)
# TODO: remove the mock below when merge profile is update
with mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._add_error_checks'):
merged_profile = profile1 + profile2
expected_corr_mat = np.array([
[1.0, -0.26559388521279237, 0.26594894270403086],
[-0.26559388521279237, 1.0, -0.49072329],
[0.26594894270403086, -0.4907239, 1.0]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
merged_profile.correlation_matrix)
self.assertDictEqual({'row_stats': 1, 'correlation': 1},
merged_profile.times)
# Merge between existing data and empty data that still has samples
data = pd.DataFrame({'a': [1, 2, 4, np.nan, None, np.nan],
'b': [5, 7, 1, np.nan, np.nan, 'nan']})
data1 = data[:3]
data2 = data[3:]
profile1 = dp.StructuredProfiler(data1, options=profile_options)
expected_corr_mat = np.array([
[1, -0.78571429],
[-0.78571429, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profile1.correlation_matrix)
profile2 = dp.StructuredProfiler(data2, options=profile_options)
merged_profile = profile1 + profile2
np.testing.assert_array_almost_equal(expected_corr_mat,
merged_profile.correlation_matrix)
def test_correlation_update(self):
profile_options = dp.ProfilerOptions()
profile_options.set({"correlation.is_enabled": True})
# Test with all numeric columns
data = pd.DataFrame({'a': [3, 2, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': [1, 5, 3, 5, 7, 2, 6, 8, 1, 2]})
data1 = data[:5]
data2 = data[5:]
with test_utils.mock_timeit():
profiler = dp.StructuredProfiler(data1, options=profile_options)
profiler.update_profile(data2)
expected_corr_mat = np.array([
[1.0, -0.26559388521279237, 0.26594894270403086],
[-0.26559388521279237, 1.0, -0.4907239],
[0.26594894270403086, -0.4907239, 1.0]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
self.assertDictEqual({'row_stats': 2, 'correlation': 2}, profiler.times)
# Test when there's a non-numeric column
data = pd.DataFrame({'a': [3, 2, 1, 7, 5, 9, 4, 10, 7, 2],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']})
data1 = data[:5]
data2 = data[5:]
profiler = dp.StructuredProfiler(data1, options=profile_options)
profiler.update_profile(data2)
expected_corr_mat = np.array([
[1.0, -0.26559388521279237, np.nan],
[-0.26559388521279237, 1.0, np.nan],
[np.nan, np.nan, np.nan]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# Data with multiple numerical and non-numeric columns, with nan values in only one column
# NaNs imputed to (9+4+10)/3
data = pd.DataFrame({'a': [7, 2, 1, 7, 5, 9, 4, 10, np.nan, np.nan],
'b': [10, 11, 1, 4, 2, 5, 6, 3, 9, 8],
'c': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'],
'd': [1, 5, 3, 5, 7, 2, 6, 8, 1, 2]})
data1 = data[:5]
data2 = data[5:]
profiler = dp.StructuredProfiler(data1, options=profile_options)
profiler.update_profile(data2)
expected_corr_mat = np.array([
[ 1, 0.04721482, np.nan, -0.09383408],
[ 0.04721482, 1, np.nan,-0.49072329],
[np.nan, np.nan, np.nan, np.nan],
[-0.09383408, -0.49072329, np.nan, 1]]
)
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# Data with null rows, all null rows are dropped
data = pd.DataFrame({'a': [np.nan, 2, 1, None, 5, np.nan, 4, 10, 7, 'NaN'],
'b': [np.nan, 11, 1, np.nan, 2, np.nan, 6, 3, 9, np.nan],
'c': [np.nan, 5, 3, np.nan, 7, None, 6, 8, 1, np.nan]})
data1 = data[:5]
data2 = data[5:]
profiler = dp.StructuredProfiler(data1, options=profile_options)
profiler.update_profile(data2)
# correlation between [2, 1, 5, 4, 10, 7],
# [11, 1, 2, 6, 3, 9],
# [5, 3, 7, 6, 8, 1]
expected_corr_mat = np.array([
[1, -0.06987956, 0.32423975],
[-0.06987956, 1, -0.3613099],
[0.32423975, -0.3613099, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
# Data with null rows and some imputed values
data = pd.DataFrame({'a': [None, np.nan, 1, 7, 5, 9, 4, 10, 'nan', 2],
'b': [10, 11, 1, 4, 2, 5, 'NaN', 3, None, 8],
'c': [1, 5, 3, 5, np.nan, 2, 6, 8, None, 2]})
data1 = data[:5]
data2 = data[5:]
profiler = dp.StructuredProfiler(data1, options=profile_options)
profiler.update_profile(data2)
# correlation between [*13/3*, *13/3*, 1, 7, 5]
# [10, 11, 1, 4, 2]
# [1, 5, 3, 5, *7/2*]
# then updated with correlation (9th row dropped) between
# [9, 4, 10, 2],
# [5, *16/3*, 3, 8],
# [2, 6, 8, 2]
expected_corr_mat = np.array([
[1, -0.16079606, 0.43658332],
[-0.16079606, 1, -0.2801748],
[0.43658332, -0.2801748, 1]
])
np.testing.assert_array_almost_equal(expected_corr_mat,
profiler.correlation_matrix)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_chi2(self, *mocks):
# Empty
data = pd.DataFrame([])
profiler = dp.StructuredProfiler(data)
self.assertIsNone(profiler.chi2_matrix)
# Single column
data = pd.DataFrame({'a': ["y", "y", "n", "n", "y"]})
profiler = dp.StructuredProfiler(data)
expected_mat = np.array([1])
self.assertEqual(expected_mat, profiler.chi2_matrix)
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "maybe", "y", "y", "n", "n", "maybe"],
'c': ["n", "maybe", "n", "n", "n", "y", "y"]})
profiler = dp.StructuredProfiler(data)
expected_mat = np.array([
[1, 0.309924, 0.404638],
[0.309924, 1, 0.548812],
[0.404638, 0.548812, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
# All different categories
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["a", "maybe", "a", "a", "b", "b", "maybe"],
'c': ["d", "d", "g", "g", "g", "t", "t"]})
profiler = dp.StructuredProfiler(data)
expected_mat = np.array([
[1, 0.007295, 0.007295],
[0.007295, 1, 0.015609],
[0.007295, 0.015609, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
# Identical columns
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "y", "y", "y", "n", "n", "n"],
'c': ["y", "y", "y", "y", "n", "n", "n"]})
profiler = dp.StructuredProfiler(data)
expected_mat = np.array([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_merge_chi2(self, *mocks):
# Merge empty data
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "maybe", "y", "y", "n", "n", "maybe"],
'c': ["n", "maybe", "n", "n", "n", "y", "y"]})
profiler1 = dp.StructuredProfiler(None)
profiler2 = dp.StructuredProfiler(data)
with mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._add_error_checks'):
profiler3 = profiler1 + profiler2
expected_mat = np.array([
[1, 0.309924, 0.404638],
[0.309924, 1, 0.548812],
[0.404638, 0.548812, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler3.chi2_matrix)
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "maybe", "y", "y", "n", "n", "maybe"],
'c': ["n", "maybe", "n", "n", "n", "y", "y"]})
data1 = data[:4]
data2 = data[4:]
profiler1 = dp.StructuredProfiler(data1)
profiler2 = dp.StructuredProfiler(data2)
profiler3 = profiler1 + profiler2
expected_mat = np.array([
[1, 0.309924, 0.404638],
[0.309924, 1, 0.548812],
[0.404638, 0.548812, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler3.chi2_matrix)
# All different categories
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["a", "maybe", "a", "a", "b", "b", "maybe"],
'c': ["d", "d", "g", "g", "g", "t", "t"]})
data1 = data[:4]
data2 = data[4:]
profiler1 = dp.StructuredProfiler(data1)
profiler2 = dp.StructuredProfiler(data2)
profiler3 = profiler1 + profiler2
expected_mat = np.array([
[1, 0.007295, 0.007295],
[0.007295, 1, 0.015609],
[0.007295, 0.015609, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler3.chi2_matrix)
# Identical columns
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "y", "y", "y", "n", "n", "n"],
'c': ["y", "y", "y", "y", "n", "n", "n"]})
data1 = data[:4]
data2 = data[4:]
profiler1 = dp.StructuredProfiler(data1)
profiler2 = dp.StructuredProfiler(data2)
profiler3 = profiler1 + profiler2
expected_mat = np.array([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler3.chi2_matrix)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler',
spec=StructuredDataLabeler)
def test_update_chi2(self, *mocks):
# Update with empty data
data1 = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "maybe", "y", "y", "n", "n", "maybe"],
'c': ["n", "maybe", "n", "n", "n", "y", "y"]})
data2 = pd.DataFrame({'a': [],
'b': [],
'c': []})
profiler = dp.StructuredProfiler(data1)
profiler.update_profile(data2)
expected_mat = np.array([
[1, 0.309924, 0.404638],
[0.309924, 1, 0.548812],
[0.404638, 0.548812, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "maybe", "y", "y", "n", "n", "maybe"],
'c': ["n", "maybe", "n", "n", "n", "y", "y"]})
data1 = data[:4]
data2 = data[4:]
profiler = dp.StructuredProfiler(data1)
profiler.update_profile(data2)
expected_mat = np.array([
[1, 0.309924, 0.404638],
[0.309924, 1, 0.548812],
[0.404638, 0.548812, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
# All different categories
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["a", "maybe", "a", "a", "b", "b", "maybe"],
'c': ["d", "d", "g", "g", "g", "t", "t"]})
data1 = data[:4]
data2 = data[4:]
profiler = dp.StructuredProfiler(data1)
profiler.update_profile(data2)
expected_mat = np.array([
[1, 0.007295, 0.007295],
[0.007295, 1, 0.015609],
[0.007295, 0.015609, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
# Identical columns
data = pd.DataFrame({'a': ["y", "y", "y", "y", "n", "n", "n"],
'b': ["y", "y", "y", "y", "n", "n", "n"],
'c': ["y", "y", "y", "y", "n", "n", "n"]})
data1 = data[:4]
data2 = data[4:]
profiler = dp.StructuredProfiler(data1)
profiler.update_profile(data2)
expected_mat = np.array([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
])
np.testing.assert_array_almost_equal(expected_mat,
profiler.chi2_matrix)
def test_correct_datatime_schema_test(self):
profile_idx = self.trained_schema._col_name_to_idx["datetime"][0]
profile = self.trained_schema.profile[profile_idx]
col_schema_info = \
profile.profiles['data_type_profile']._profiles["datetime"]
self.assertEqual(2999, profile.sample_size)
self.assertEqual(col_schema_info.sample_size,
col_schema_info.match_count)
self.assertEqual(2, profile.null_count)
six.assertCountEqual(self, ['nan'], profile.null_types)
self.assertEqual(['%m/%d/%y %H:%M'], col_schema_info['date_formats'])
def test_correct_integer_column_detection_src(self):
profile_idx = self.trained_schema._col_name_to_idx["src"][0]
profile = self.trained_schema.profile[profile_idx]
col_schema_info = profile.profiles['data_type_profile']._profiles["int"]
self.assertEqual(2999, profile.sample_size)
self.assertEqual(col_schema_info.sample_size,
col_schema_info.match_count)
self.assertEqual(3, profile.null_count)
def test_correct_integer_column_detection_int_col(self):
profile_idx = self.trained_schema._col_name_to_idx["int_col"][0]
profile = self.trained_schema.profile[profile_idx]
col_schema_info = profile.profiles['data_type_profile']._profiles["int"]
self.assertEqual(2999, profile.sample_size)
self.assertEqual(col_schema_info.sample_size,
col_schema_info.match_count)
self.assertEqual(0, profile.null_count)
def test_correct_integer_column_detection_port(self):
profile_idx = self.trained_schema._col_name_to_idx["srcport"][0]
profile = self.trained_schema.profile[profile_idx]
col_schema_info = profile.profiles['data_type_profile']._profiles["int"]
self.assertEqual(2999, profile.sample_size)
self.assertEqual(col_schema_info.sample_size,
col_schema_info.match_count)
self.assertEqual(197, profile.null_count)
def test_correct_integer_column_detection_destport(self):
profile_idx = self.trained_schema._col_name_to_idx["destport"][0]
profile = self.trained_schema.profile[profile_idx]
col_schema_info = profile.profiles['data_type_profile']._profiles["int"]
self.assertEqual(2999, profile.sample_size)
self.assertEqual(col_schema_info.sample_size,
col_schema_info.match_count)
self.assertEqual(197, profile.null_count)
def test_report(self):
report = self.trained_schema.report()
self.assertListEqual(list(report.keys()), [
'global_stats', 'data_stats'])
self.assertListEqual(
list(report['global_stats']),
[
"samples_used", "column_count", "row_count",
"row_has_null_ratio", 'row_is_null_ratio',
"unique_row_ratio", "duplicate_row_count", "file_type",
"encoding", "correlation_matrix", "chi2_matrix", "profile_schema", "times"
]
)
flat_report = self.trained_schema.report(
report_options={"output_format": "flat"})
self.assertEqual(test_utils.get_depth(flat_report), 1)
with mock.patch('dataprofiler.profilers.helpers.report_helpers'
'._prepare_report') as pr_mock:
self.trained_schema.report(
report_options={"output_format": 'pretty'})
# Once for global_stats, once for each of 16 columns
self.assertEqual(pr_mock.call_count, 17)
def test_report_schema_and_data_stats_match_order(self):
data = pd.DataFrame([[1, 2, 3, 4, 5, 6],
[10, 20, 30, 40, 50, 60]],
columns=["a", "b", "a", "b", "c", "d"])
profiler_options = ProfilerOptions()
profiler_options.set({'data_labeler.is_enabled': False})
profiler = dp.StructuredProfiler(data=data, options=profiler_options)
report = profiler.report()
schema = report["global_stats"]["profile_schema"]
data_stats = report["data_stats"]
expected_schema = {"a": [0, 2], "b": [1, 3], "c": [4], "d": [5]}
self.assertDictEqual(expected_schema, schema)
# Check that the column order in the report matches the column order
# In the schema (and in the data)
for name in schema:
for idx in schema[name]:
# Use min of column to validate column order amongst duplicates
col_min = data.iloc[0, idx]
self.assertEqual(name, data_stats[idx]["column_name"])
self.assertEqual(col_min, data_stats[idx]["statistics"]["min"])
def test_pretty_report_doesnt_cast_schema(self):
report = self.trained_schema.report(
report_options={"output_format": "pretty"})
# Want to ensure the values of this dict are of type list[int]
# Since pretty "prettifies" lists into strings with ... to shorten
expected_schema = {"datetime": [0], "host": [1], "src": [2],
"proto": [3], "type": [4], "srcport": [5],
"destport": [6], "srcip": [7], "locale": [8],
"localeabbr": [9], "postalcode": [10],
"latitude": [11], "longitude": [12], "owner": [13],
"comment": [14], "int_col": [15]}
self.assertDictEqual(expected_schema,
report["global_stats"]["profile_schema"])
def test_omit_keys_with_duplicate_cols(self):
data = pd.DataFrame([[1, 2, 3, 4, 5, 6],
[10, 20, 30, 40, 50, 60]],
columns=["a", "b", "a", "b", "c", "d"])
profiler_options = ProfilerOptions()
profiler_options.set({'data_labeler.is_enabled': False})
profiler = dp.StructuredProfiler(data=data, options=profiler_options)
report = profiler.report(report_options={
"omit_keys": ["data_stats.a.statistics.min",
"data_stats.d.statistics.max",
"data_stats.*.statistics.null_types_index"]})
# Correctness of schema asserted in prior test
schema = report["global_stats"]["profile_schema"]
data_stats = report["data_stats"]
for idx in range(len(report["data_stats"])):
# Assert that min is absent from a's data_stats and not the others
if idx in schema["a"]:
self.assertNotIn("min", data_stats[idx]["statistics"])
else:
self.assertIn("min", report["data_stats"][idx]["statistics"])
# Assert that max is absent from d's data_stats and not the others
if idx in schema["d"]:
self.assertNotIn("max", report["data_stats"][idx]["statistics"])
else:
self.assertIn("max", report["data_stats"][idx]["statistics"])
# Assert that null_types_index not present in any
self.assertNotIn("null_types_index",
report["data_stats"][idx]["statistics"])
def test_omit_cols_preserves_schema(self):
data = pd.DataFrame([[1, 2, 3, 4, 5, 6],
[10, 20, 30, 40, 50, 60]],
columns=["a", "b", "a", "b", "c", "d"])
omit_cols = ["a", "d"]
omit_idxs = [0, 2, 5]
omit_keys = [f"data_stats.{col}" for col in omit_cols]
profiler_options = ProfilerOptions()
profiler_options.set({'data_labeler.is_enabled': False})
profiler = dp.StructuredProfiler(data=data, options=profiler_options)
report = profiler.report(report_options={"omit_keys": omit_keys})
for idx in range(len(report["data_stats"])):
if idx in omit_idxs:
self.assertIsNone(report["data_stats"][idx])
else:
self.assertIsNotNone(report["data_stats"][idx])
# This will keep the data_stats key but remove all columns
report = profiler.report(report_options={"omit_keys": ["data_stats.*"]})
for col_report in report["data_stats"]:
self.assertIsNone(col_report)
def test_report_quantiles(self):
report_none = self.trained_schema.report(
report_options={"num_quantile_groups": None})
report = self.trained_schema.report()
self.assertEqual(report_none, report)
for col in report["data_stats"]:
if col["column_name"] == "int_col":
report_quantiles = col["statistics"]["quantiles"]
break
self.assertEqual(len(report_quantiles), 3)
report2 = self.trained_schema.report(
report_options={"num_quantile_groups": 1000})
for col in report2["data_stats"]:
if col["column_name"] == "int_col":
report2_1000_quant = col["statistics"]["quantiles"]
break
self.assertEqual(len(report2_1000_quant), 999)
self.assertEqual(report_quantiles, {
0: report2_1000_quant[249],
1: report2_1000_quant[499],
2: report2_1000_quant[749],
})
def test_report_omit_keys(self):
# Omit both report keys manually
no_report_keys = self.trained_schema.report(
report_options={"omit_keys": ['global_stats', 'data_stats']})
self.assertCountEqual({}, no_report_keys)
# Omit just data_stats
no_data_stats = self.trained_schema.report(
report_options={"omit_keys": ['data_stats']})
self.assertCountEqual({"global_stats"}, no_data_stats)
# Omit a global stat
no_samples_used = self.trained_schema.report(
report_options={"omit_keys": ['global_stats.samples_used']})
self.assertNotIn("samples_used", no_samples_used["global_stats"])
# Omit all keys
nothing = self.trained_schema.report(
report_options={"omit_keys": ['*']})
self.assertCountEqual({}, nothing)
# Omit every data_stats column
empty_data_stats_cols = self.trained_schema.report(
report_options={"omit_keys": ['global_stats', 'data_stats.*']})
# data_stats key still present, but all columns are None
self.assertCountEqual({"data_stats"}, empty_data_stats_cols)
self.assertTrue(all([rep is None
for rep in empty_data_stats_cols["data_stats"]]))
# Omit specific data_stats column
no_datetime = self.trained_schema.report(
report_options={"omit_keys": ['data_stats.datetime']})
self.assertNotIn("datetime", no_datetime["data_stats"])
# Omit a statistic from each column
no_sum = self.trained_schema.report(
report_options={"omit_keys": ['data_stats.*.statistics.sum']})
self.assertTrue(all(["sum" not in rep["statistics"]
for rep in no_sum["data_stats"]]))
def test_report_compact(self):
report = self.trained_schema.report(
report_options={ "output_format": "pretty" })
omit_keys = [
"data_stats.*.statistics.times",
"data_stats.*.statistics.avg_predictions",
"data_stats.*.statistics.data_label_representation",
"data_stats.*.statistics.null_types_index",
"data_stats.*.statistics.histogram"
]
report = _prepare_report(report, 'pretty', omit_keys)
report_compact = self.trained_schema.report(
report_options={"output_format": "compact"})
self.assertEqual(report, report_compact)
def test_profile_key_name_without_space(self):
def recursive_test_helper(report, prev_key=None):
for key in report:
# do not test keys in 'data_stats' as they contain column names
# neither for 'ave_predictions' and 'data_label_representation'
# as they contain label names
# same for 'null_types_index'
if prev_key not in ['data_stats', 'avg_predictions',
'data_label_representation',
'null_types_index', 'categorical_count']:
# key names should contain only alphanumeric letters or '_'
self.assertIsNotNone(re.match('^[a-zA-Z0-9_]+$', str(key)))
if isinstance(report[key], dict):
recursive_test_helper(report[key], key)
_report = self.trained_schema.report()
recursive_test_helper(_report)
def test_data_label_assigned(self):
# only use 5 samples
trained_schema = dp.StructuredProfiler(self.aws_dataset, samples_per_update=5)
report = trained_schema.report()
has_non_null_column = False
for i in range(len(report['data_stats'])):
# only test non-null columns
if report['data_stats'][i]['data_type'] is not None:
self.assertIsNotNone(report['data_stats'][i]['data_label'])
has_non_null_column = True
if not has_non_null_column:
self.fail(
"Dataset tested did not have a non-null column and therefore "
"could not validate the test.")
def test_text_data_raises_error(self):
text_file_path = os.path.join(
test_root_path, 'data', 'txt/sentence-10x.txt'
)
with self.assertRaisesRegex(TypeError, 'Cannot provide TextData object'
' to StructuredProfiler'):
profiler = dp.StructuredProfiler(dp.Data(text_file_path))
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_chi2')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch('dataprofiler.profilers.profile_builder.StructuredProfiler.'
'_update_row_statistics')
@mock.patch('dataprofiler.profilers.profile_builder.StructuredColProfiler')
def test_sample_size_warning_in_the_profiler(self, *mocks):
# structure data profile mock
sdp_mock = mock.Mock()
sdp_mock.clean_data_and_get_base_stats.return_value = (None, None)
mocks[0].return_value = sdp_mock
data = pd.DataFrame([1, None, 3, 4, 5, None])
with self.assertWarnsRegex(UserWarning,
"The data will be profiled with a sample "
"size of 3. All statistics will be based on "
"this subsample and not the whole dataset."):
profile1 = dp.StructuredProfiler(data, samples_per_update=3)
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
def test_min_col_samples_used(self, *mocks):
# No cols sampled since no cols to sample
empty_df = pd.DataFrame([])
empty_profile = dp.StructuredProfiler(empty_df)
self.assertEqual(0, empty_profile._min_col_samples_used)
# Every column fully sampled
full_df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
full_profile = dp.StructuredProfiler(full_df)
self.assertEqual(3, full_profile._min_col_samples_used)
# First col sampled only twice, so that is min
sparse_df = pd.DataFrame([[1, None, None],
[1, 1, None],
[1, None, 1]])
sparse_profile = dp.StructuredProfiler(sparse_df, min_true_samples=2,
samples_per_update=1)
self.assertEqual(2, sparse_profile._min_col_samples_used)
@mock.patch('dataprofiler.profilers.profile_builder.StructuredProfiler.'
'_update_profile_from_chunk')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
def test_min_true_samples(self, *mocks):
empty_df = pd.DataFrame([])
# Test invalid input
msg = "`min_true_samples` must be an integer or `None`."
with self.assertRaisesRegex(ValueError, msg):
profile = dp.StructuredProfiler(empty_df, min_true_samples="Bloop")
# Test invalid input given to update_profile
profile = dp.StructuredProfiler(empty_df)
with self.assertRaisesRegex(ValueError, msg):
profile.update_profile(empty_df, min_true_samples="Bloop")
# Test None input (equivalent to zero)
profile = dp.StructuredProfiler(empty_df, min_true_samples=None)
self.assertEqual(None, profile._min_true_samples)
# Test valid input
profile = dp.StructuredProfiler(empty_df, min_true_samples=10)
self.assertEqual(10, profile._min_true_samples)
def test_save_and_load(self):
datapth = "dataprofiler/tests/data/"
test_files = ["csv/guns.csv", "csv/iris.csv"]
for test_file in test_files:
# Create Data and StructuredProfiler objects
data = dp.Data(os.path.join(datapth, test_file))
options = ProfilerOptions()
options.set({"correlation.is_enabled": True})
save_profile = dp.StructuredProfiler(data)
# store the expected data_labeler
data_labeler = save_profile.options.data_labeler.data_labeler_object
# Save and Load profile with Mock IO
with mock.patch('builtins.open') as m:
mock_file = setup_save_mock_open(m)
save_profile.save()
mock_file.seek(0)
with mock.patch('dataprofiler.profilers.profile_builder.'
'DataLabeler', return_value=data_labeler):
load_profile = dp.StructuredProfiler.load("mock.pkl")
# validate loaded profile has same data labeler class
self.assertIsInstance(
load_profile.options.data_labeler.data_labeler_object,
data_labeler.__class__)
# only checks first columns
# get first column
first_column_profile = load_profile.profile[0]
self.assertIsInstance(
first_column_profile.profiles['data_label_profile']
._profiles['data_labeler'].data_labeler,
data_labeler.__class__)
# Check that reports are equivalent
save_report = test_utils.clean_report(save_profile.report())
load_report = test_utils.clean_report(load_profile.report())
np.testing.assert_equal(save_report, load_report)
def test_save_and_load_no_labeler(self):
# Create Data and UnstructuredProfiler objects
data = pd.DataFrame([1, 2, 3], columns=["a"])
profile_options = dp.ProfilerOptions()
profile_options.set({"data_labeler.is_enabled": False})
save_profile = dp.StructuredProfiler(data, options=profile_options)
# Save and Load profile with Mock IO
with mock.patch('builtins.open') as m:
mock_file = setup_save_mock_open(m)
save_profile.save()
mock_file.seek(0)
with mock.patch('dataprofiler.profilers.profile_builder.'
'DataLabeler'):
load_profile = dp.StructuredProfiler.load("mock.pkl")
# Check that reports are equivalent
save_report = test_utils.clean_report(save_profile.report())
load_report = test_utils.clean_report(load_profile.report())
self.assertDictEqual(save_report, load_report)
# validate both are still usable after
save_profile.update_profile(pd.DataFrame({"a": [4, 5]}))
load_profile.update_profile(pd.DataFrame({"a": [4, 5]}))
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnPrimitiveTypeProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnStatsProfileCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'ColumnDataLabelerCompiler')
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
@mock.patch('dataprofiler.profilers.profile_builder.'
'StructuredProfiler._update_correlation')
def test_string_index_doesnt_cause_error(self, *mocks):
dp.StructuredProfiler( | pd.DataFrame([[1, 2, 3]], index=["hello"]) | pandas.DataFrame |
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import os
import argparse
from sklearn import preprocessing
from matplotlib.ticker import EngFormatter
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f1', '--logFolder1', help='Log folder 1', type=str)
parser.add_argument('-f2', '--logFolder2', help='Log folder 2', type=str)
parser.add_argument('-s', '--saveFolder', help='save folder', type=str)
args = parser.parse_args()
path_base = args.logFolder1 # "logs/train_1M_widowx_reach-v3/"
path_base2 = args.logFolder2 # "logs/train_1M_widowx_reach-v3/"
save_dir = args.saveFolder #"experiment_reports/1M_widowx_reach-v3/"
os.makedirs(save_dir, exist_ok=True)
### GET DATA ###
df1 = pd.read_csv(path_base+"a2c/all_rewards_smooth.csv")
df2 = pd.read_csv(path_base+"acktr/all_rewards_smooth.csv")
df3 = pd.read_csv(path_base+"ddpg/all_rewards_smooth.csv")
df4 = pd.read_csv(path_base+"ppo2/all_rewards_smooth.csv")
df5 = pd.read_csv(path_base+"sac/all_rewards_smooth.csv")
df6 = pd.read_csv(path_base+"td3/all_rewards_smooth.csv")
df7 = pd.read_csv(path_base+"trpo/all_rewards_smooth.csv")
df8 = pd.read_csv(path_base+"her_sac/all_rewards_smooth.csv")
df9 = pd.read_csv(path_base+"her_td3/all_rewards_smooth.csv")
env2_df1 = pd.read_csv(path_base2+"a2c/all_rewards_smooth.csv")
env2_df2 = pd.read_csv(path_base2+"acktr/all_rewards_smooth.csv")
env2_df3 = pd.read_csv(path_base2+"ddpg/all_rewards_smooth.csv")
env2_df4 = pd.read_csv(path_base2+"ppo2/all_rewards_smooth.csv")
env2_df5 = pd.read_csv(path_base2+"sac/all_rewards_smooth.csv")
env2_df6 = pd.read_csv(path_base2+"td3/all_rewards_smooth.csv")
env2_df7 = pd.read_csv(path_base2+"trpo/all_rewards_smooth.csv")
env2_df8 = pd.read_csv(path_base2+"her_sac/all_rewards_smooth.csv")
env2_df9 = pd.read_csv(path_base2+"her_td3/all_rewards_smooth.csv")
df_list = [
df1,
df2,
df3,
df4,
df5,
df6,
df7,
df8,
df9
]
df_list2 = [
env2_df1,
env2_df2,
env2_df3,
env2_df4,
env2_df5,
env2_df6,
env2_df7,
env2_df8,
env2_df9
]
df_label = [
"A2C",
"ACKTR",
"DDPG",
"PPO2",
"SAC",
"TD3",
"TRPO",
"SAC + HER",
"TD3 + HER"
]
ff1 = pd.read_csv(path_base+"/a2c/results_seed_exp.csv")
ff2 = pd.read_csv(path_base+"/acktr/results_seed_exp.csv")
ff3 = pd.read_csv(path_base+"/ddpg/results_seed_exp.csv")
ff4 = pd.read_csv(path_base+"/ppo2/results_seed_exp.csv")
ff5 = pd.read_csv(path_base+"/sac/results_seed_exp.csv")
ff6 = pd.read_csv(path_base+"/td3/results_seed_exp.csv")
ff7 = pd.read_csv(path_base+"/trpo/results_seed_exp.csv")
ff8 = pd.read_csv(path_base+"/her_sac/results_seed_exp.csv")
ff9 = pd.read_csv(path_base+"/her_td3/results_seed_exp.csv")
env2_ff1 = pd.read_csv(path_base2+"/a2c/results_seed_exp.csv")
env2_ff2 = pd.read_csv(path_base2+"/acktr/results_seed_exp.csv")
env2_ff3 = | pd.read_csv(path_base2+"/ddpg/results_seed_exp.csv") | pandas.read_csv |
import pytest
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.svm import LinearSVC, LinearSVR
from foreshadow.console import generate_model
from foreshadow.estimators import AutoEstimator
from foreshadow.utils import EstimatorFamily, ProblemType
from foreshadow.utils.testing import get_file_path
@pytest.mark.skip("level 2 is not implemented.")
def test_console_generate_level2():
pass
def test_console_get_method_default_regression():
import pandas as pd
from foreshadow.console import get_method
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
boston = load_boston()
X_df = pd.DataFrame(boston.data, columns=boston.feature_names)
y_df = pd.DataFrame(boston.target, columns=["target"])
X_train, X_test, y_train, y_test = train_test_split(
X_df, y_df, test_size=0.2
)
result = get_method(
None,
y_train,
family=EstimatorFamily.LINEAR,
problem_type=ProblemType.REGRESSION,
)
assert isinstance(result, LinearRegression)
def test_console_get_method_default_classification():
import pandas as pd
from foreshadow.console import get_method
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
cancer = load_breast_cancer()
X_df = | pd.DataFrame(cancer.data, columns=cancer.feature_names) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/24 15:02
Desc: 东方财富网-数据中心-新股数据-打新收益率
东方财富网-数据中心-新股数据-打新收益率
http://data.eastmoney.com/xg/xg/dxsyl.html
东方财富网-数据中心-新股数据-新股申购与中签查询
http://data.eastmoney.com/xg/xg/default_2.html
"""
import pandas as pd
import requests
from tqdm import tqdm
from akshare.utils import demjson
def _get_page_num_dxsyl() -> int:
"""
东方财富网-数据中心-新股数据-打新收益率-总页数
http://data.eastmoney.com/xg/xg/dxsyl.html
:return: 总页数
:rtype: int
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"st": "16",
"sr": "-1",
"ps": "500",
"p": '1',
"type": "NS",
"sty": "NSDXSYL",
"js": "({data:[(x)],pages:(pc)})",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
total_page = data_json["pages"]
return total_page
def stock_dxsyl_em() -> pd.DataFrame:
"""
东方财富网-数据中心-新股数据-打新收益率
http://data.eastmoney.com/xg/xg/dxsyl.html
:return: 指定市场的打新收益率数据
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
page_num = _get_page_num_dxsyl()
big_df = pd.DataFrame()
for page in tqdm(range(1, page_num + 1), leave=False):
params = {
"st": "16",
"sr": "-1",
"ps": "500",
"p": str(page),
"type": "NS",
"sty": "NSDXSYL",
"js": "({data:[(x)],pages:(pc)})",
}
res = requests.get(url, params=params)
data_text = res.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(',') for item in data_json["data"]])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df.index + 1
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"发行价",
"最新价",
"网上-发行中签率",
"网上-有效申购股数",
"网上-有效申购户数",
"网上-超额认购倍数",
"网下-配售中签率",
"网下-有效申购股数",
"网下-有效申购户数",
"网下-配售认购倍数",
"总发行数量",
"开盘溢价",
"首日涨幅",
"打新收益",
"上市日期",
"-",
]
big_df = big_df[[
"序号",
"股票代码",
"股票简称",
"发行价",
"最新价",
"网上-发行中签率",
"网上-有效申购股数",
"网上-有效申购户数",
"网上-超额认购倍数",
"网下-配售中签率",
"网下-有效申购股数",
"网下-有效申购户数",
"网下-配售认购倍数",
"总发行数量",
"开盘溢价",
"首日涨幅",
"打新收益",
"上市日期",
]]
big_df["发行价"] = pd.to_numeric(big_df["发行价"], errors='coerce')
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["网上-发行中签率"] = pd.to_numeric(big_df["网上-发行中签率"])
big_df["网上-有效申购股数"] = pd.to_numeric(big_df["网上-有效申购股数"])
big_df["网上-有效申购户数"] = pd.to_numeric(big_df["网上-有效申购户数"])
big_df["网上-超额认购倍数"] = pd.to_numeric(big_df["网上-超额认购倍数"])
big_df["网下-配售中签率"] = pd.to_numeric(big_df["网下-配售中签率"])
big_df["网下-有效申购股数"] = pd.to_numeric(big_df["网下-有效申购股数"])
big_df["网下-有效申购户数"] = pd.to_numeric(big_df["网下-有效申购户数"])
big_df["网下-配售认购倍数"] = pd.to_numeric(big_df["网下-配售认购倍数"])
big_df["总发行数量"] = pd.to_numeric(big_df["总发行数量"])
big_df["开盘溢价"] = pd.to_numeric(big_df["开盘溢价"])
big_df["首日涨幅"] = pd.to_numeric(big_df["首日涨幅"])
big_df["打新收益"] = pd.to_numeric(big_df["打新收益"])
return big_df
def stock_xgsglb_em(symbol: str = "京市A股") -> pd.DataFrame:
"""
新股申购与中签查询
http://data.eastmoney.com/xg/xg/default_2.html
:param symbol: choice of {"全部股票", "沪市A股", "科创板", "深市A股", "创业板", "京市A股"}
:type symbol: str
:return: 新股申购与中签数据
:rtype: pandas.DataFrame
"""
market_map = {
"全部股票": """(APPLY_DATE>'2010-01-01')""",
"沪市A股": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE in ("058001001","058001008"))(TRADE_MARKET_CODE in ("069001001001","069001001003","069001001006"))""",
"科创板": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE in ("058001001","058001008"))(TRADE_MARKET_CODE="069001001006")""",
"深市A股": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE="058001001")(TRADE_MARKET_CODE in ("069001002001","069001002002","069001002003","069001002005"))""",
"创业板": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE="058001001")(TRADE_MARKET_CODE="069001002002")""",
}
url = "http://datacenter-web.eastmoney.com/api/data/v1/get"
if symbol == "京市A股":
params = {
'sortColumns': 'APPLY_DATE',
'sortTypes': '-1',
'pageSize': '500',
'pageNumber': '1',
'columns': 'ALL',
'reportName': 'RPT_NEEQ_ISSUEINFO_LIST',
'quoteColumns': 'f14~01~SECURITY_CODE~SECURITY_NAME_ABBR',
'source': 'NEEQSELECT',
'client': 'WEB',
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json['result']['pages']
big_df = pd.DataFrame()
for page in tqdm(range(1, 1+int(total_page)), leave=False):
params.update({
'pageNumber': page
})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']['data'])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df.index + 1
big_df.columns = [
'序号',
'-',
'代码',
'-',
'简称',
'申购代码',
'发行总数',
'-',
'发行价格',
'发行市盈率',
'申购日',
'发行结果公告日',
'上市日',
'网上发行数量',
'顶格申购所需资金',
'申购上限',
'网上申购缴款日',
'网上申购退款日',
'-',
'网上获配比例',
'最新价',
'首日收盘价',
'网下有效申购倍数',
'每百股获利',
'-',
'-',
'-',
'-',
'-',
'-',
]
big_df = big_df[[
'序号',
'代码',
'简称',
'申购代码',
'发行总数',
'网上发行数量',
'顶格申购所需资金',
'申购上限',
'发行价格',
'最新价',
'首日收盘价',
'申购日',
'网上申购缴款日',
'网上申购退款日',
'上市日',
'发行结果公告日',
'发行市盈率',
'网上获配比例',
'网下有效申购倍数',
'每百股获利',
]]
big_df['发行总数'] = pd.to_numeric(big_df['发行总数'])
big_df['网上发行数量'] = pd.to_numeric(big_df['网上发行数量'])
big_df['顶格申购所需资金'] = pd.to_numeric(big_df['顶格申购所需资金'])
big_df['申购上限'] = pd.to_numeric(big_df['申购上限'])
big_df['发行价格'] = pd.to_numeric(big_df['发行价格'])
big_df['最新价'] = pd.to_numeric(big_df['最新价'])
big_df['首日收盘价'] = pd.to_numeric(big_df['首日收盘价'])
big_df['发行市盈率'] = pd.to_numeric(big_df['发行市盈率'])
big_df['网上获配比例'] = pd.to_numeric(big_df['网上获配比例'])
big_df['网下有效申购倍数'] = pd.to_numeric(big_df['网下有效申购倍数'])
big_df['每百股获利'] = pd.to_numeric(big_df['每百股获利'])
big_df['申购日'] = pd.to_datetime(big_df['申购日']).dt.date
big_df['网上申购缴款日'] = pd.to_datetime(big_df['网上申购缴款日']).dt.date
big_df['网上申购退款日'] = pd.to_datetime(big_df['网上申购退款日']).dt.date
big_df['上市日'] = pd.to_datetime(big_df['上市日']).dt.date
big_df['发行结果公告日'] = pd.to_datetime(big_df['发行结果公告日']).dt.date
return big_df
else:
params = {
'sortColumns': 'APPLY_DATE,SECURITY_CODE',
'sortTypes': '-1,-1',
'pageSize': '5000',
'pageNumber': '1',
'reportName': 'RPTA_APP_IPOAPPLY',
'columns': 'SECURITY_CODE,SECURITY_NAME,TRADE_MARKET_CODE,APPLY_CODE,TRADE_MARKET,MARKET_TYPE,ORG_TYPE,ISSUE_NUM,ONLINE_ISSUE_NUM,OFFLINE_PLACING_NUM,TOP_APPLY_MARKETCAP,PREDICT_ONFUND_UPPER,ONLINE_APPLY_UPPER,PREDICT_ONAPPLY_UPPER,ISSUE_PRICE,LATELY_PRICE,CLOSE_PRICE,APPLY_DATE,BALLOT_NUM_DATE,BALLOT_PAY_DATE,LISTING_DATE,AFTER_ISSUE_PE,ONLINE_ISSUE_LWR,INITIAL_MULTIPLE,INDUSTRY_PE_NEW,OFFLINE_EP_OBJECT,CONTINUOUS_1WORD_NUM,TOTAL_CHANGE,PROFIT,LIMIT_UP_PRICE,INFO_CODE,OPEN_PRICE,LD_OPEN_PREMIUM,LD_CLOSE_CHANGE,TURNOVERRATE,LD_HIGH_CHANG,LD_AVERAGE_PRICE,OPEN_DATE,OPEN_AVERAGE_PRICE,PREDICT_PE,PREDICT_ISSUE_PRICE2,PREDICT_ISSUE_PRICE,PREDICT_ISSUE_PRICE1,PREDICT_ISSUE_PE,PREDICT_PE_THREE,ONLINE_APPLY_PRICE,MAIN_BUSINESS',
'filter': market_map[symbol],
'source': 'WEB',
'client': 'WEB',
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json['result']['pages']
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page+1), leave=False):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']['data'])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.columns = [
"股票代码",
"股票简称",
"_",
"申购代码",
"_",
"_",
"_",
"发行总数",
"网上发行",
"_",
"顶格申购需配市值",
"_",
"申购上限",
"_",
"发行价格",
"最新价",
"首日收盘价",
"申购日期",
"中签号公布日",
"中签缴款日期",
"上市日期",
"发行市盈率",
"中签率",
"询价累计报价倍数",
"_",
"配售对象报价家数",
"连续一字板数量",
"涨幅",
"每中一签获利",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"行业市盈率",
"_",
"_",
"_",
]
big_df = big_df[
[
"股票代码",
"股票简称",
"申购代码",
"发行总数",
"网上发行",
"顶格申购需配市值",
"申购上限",
"发行价格",
"最新价",
"首日收盘价",
"申购日期",
"中签号公布日",
"中签缴款日期",
"上市日期",
"发行市盈率",
"行业市盈率",
"中签率",
"询价累计报价倍数",
"配售对象报价家数",
"连续一字板数量",
"涨幅",
"每中一签获利",
]
]
big_df['申购日期'] = pd.to_datetime(big_df['申购日期']).dt.date
big_df['中签号公布日'] = pd.to_datetime(big_df['中签号公布日']).dt.date
big_df['中签缴款日期'] = pd.to_dateti | me(big_df['中签缴款日期']) | pandas.to_datetime |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import abc
import sys
import copy
import time
import datetime
import importlib
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor, as_completed
import fire
import requests
import numpy as np
import pandas as pd
from tqdm import tqdm
from loguru import logger
from yahooquery import Ticker
from dateutil.tz import tzlocal
CUR_DIR = Path(__file__).resolve().parent
sys.path.append(str(CUR_DIR.parent.parent))
from data_collector.utils import get_calendar_list, get_hs_stock_symbols, get_us_stock_symbols
INDEX_BENCH_URL = "http://push2his.eastmoney.com/api/qt/stock/kline/get?secid=1.{index_code}&fields1=f1%2Cf2%2Cf3%2Cf4%2Cf5&fields2=f51%2Cf52%2Cf53%2Cf54%2Cf55%2Cf56%2Cf57%2Cf58&klt=101&fqt=0&beg={begin}&end={end}"
REGION_CN = "CN"
REGION_US = "US"
class YahooCollector:
START_DATETIME = pd.Timestamp("2000-01-01")
HIGH_FREQ_START_DATETIME = pd.Timestamp(datetime.datetime.now() - pd.Timedelta(days=5 * 5))
END_DATETIME = pd.Timestamp(datetime.datetime.now() + | pd.Timedelta(days=1) | pandas.Timedelta |
import numpy as np
import pytest
import sklearn
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.utils.validation import check_is_fitted
from sklearn.exceptions import NotFittedError
from distutils.version import LooseVersion
from dirty_cat import SuperVectorizer
from dirty_cat import GapEncoder
def check_same_transformers(expected_transformers: dict, actual_transformers: list):
# Construct the dict from the actual transformers
actual_transformers_dict = dict([(name, cols) for name, trans, cols in actual_transformers])
assert actual_transformers_dict == expected_transformers
def _get_clean_dataframe():
"""
Creates a simple DataFrame with various types of data,
and without missing values.
"""
return pd.DataFrame({
'int': pd.Series([15, 56, 63, 12, 44], dtype='int'),
'float': | pd.Series([5.2, 2.4, 6.2, 10.45, 9.], dtype='float') | pandas.Series |
"""
Prepare training and testing datasets as CSV dictionaries 2.0 (Further modification required for GBM)
Created on 04/26/2019
@author: RH
"""
import os
import pandas as pd
import sklearn.utils as sku
import numpy as np
import re
# get all full paths of images
def image_ids_in(root_dir, ignore=['.DS_Store','dict.csv', 'all.csv']):
ids = []
for id in os.listdir(root_dir):
if id in ignore:
print('Skipping ID:', id)
else:
ids.append(id)
return ids
# Get intersection of 2 lists
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
# pair tiles of 20x, 10x, 5x of the same area
def paired_tile_ids_in(slide, label, root_dir, sldnum):
dira = os.path.isdir(root_dir + 'level0')
dirb = os.path.isdir(root_dir + 'level1')
dirc = os.path.isdir(root_dir + 'level2')
if dira and dirb and dirc:
fac = 500
ids = []
for level in range(3):
dirr = root_dir + 'level{}'.format(str(level))
for id in os.listdir(dirr):
if '_{}.png'.format(str(sldnum)) in id:
x = int(float(id.split('x-', 1)[1].split('-', 1)[0]) / fac)
y = int(float(re.split('_', id.split('y-', 1)[1])[0]) / fac)
try:
dup = int(re.split('.p', re.split('_', id.split('y-', 1)[1])[1])[0])
except IndexError:
dup = np.nan
ids.append([slide, label, level, dirr + '/' + id, x, y, dup])
ids = pd.DataFrame(ids, columns=['slide', 'label', 'level', 'path', 'x', 'y', 'dup'])
idsa = ids.loc[ids['level'] == 0]
idsa = idsa.drop(columns=['level'])
idsa = idsa.rename(index=str, columns={"path": "L0path"})
idsb = ids.loc[ids['level'] == 1]
idsb = idsb.drop(columns=['slide', 'label', 'level'])
idsb = idsb.rename(index=str, columns={"path": "L1path"})
idsc = ids.loc[ids['level'] == 2]
idsc = idsc.drop(columns=['slide', 'label', 'level'])
idsc = idsc.rename(index=str, columns={"path": "L2path"})
idsa = pd.merge(idsa, idsb, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa['x'] = idsa['x'] - (idsa['x'] % 2)
idsa['y'] = idsa['y'] - (idsa['y'] % 2)
idsa = pd.merge(idsa, idsc, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa = idsa.drop(columns=['x', 'y', 'dup'])
idsa = idsa.dropna()
idsa = sku.shuffle(idsa)
else:
idsa = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path'])
return idsa
# Get all svs images with its label as one file; level is the tile resolution level
def big_image_sum(pmd, path='../tiles/', dict_file='../tcia_pathology_slides.tsv',
ref_file='../gbm_all_subtype_collections.2019-10-13.tsv'):
refdict = {'low': 0, 'high': 1, False: 0, True: 1, 'normal': 0, 'short': 1, 'long': 2}
dct = pd.read_csv(dict_file, sep='\t', header=0)
# dct = dct.loc[dct['used_in_proteome'] == True]
ref = pd.read_csv(ref_file, sep='\t', header=0)
ref = ref.dropna(subset=[pmd])
ref[pmd] = ref[pmd].replace(refdict)
big_images = []
if pmd == 'telomere':
normalimg = intersection(ref.loc[ref[pmd] == 0]['case'].tolist(), dct['case_id'].tolist())
normalsld = dct[dct['case_id'].isin(normalimg)]['slide_id'].tolist()
shortimg = intersection(ref.loc[ref[pmd] == 1]['case'].tolist(), dct['case_id'].tolist())
shortsld = dct[dct['case_id'].isin(shortimg)]['slide_id'].tolist()
longimg = intersection(ref.loc[ref[pmd] == 2]['case'].tolist(), dct['case_id'].tolist())
longsld = dct[dct['case_id'].isin(longimg)]['slide_id'].tolist()
for i in normalsld:
sldnum = i.split('-')[-1]
pctnum = i[:-3]
big_images.append([pctnum, 0, path + "{}/".format(pctnum), sldnum])
for i in shortsld:
sldnum = i.split('-')[-1]
pctnum = i[:-3]
big_images.append([pctnum, 1, path + "{}/".format(pctnum), sldnum])
for i in longsld:
sldnum = i.split('-')[-1]
pctnum = i[:-3]
big_images.append([pctnum, 2, path + "{}/".format(pctnum), sldnum])
else:
negimg = intersection(ref.loc[ref[pmd] == 0]['case'].tolist(), dct['case_id'].tolist())
negsld = dct[dct['case_id'].isin(negimg)]['slide_id'].tolist()
posimg = intersection(ref.loc[ref[pmd] == 1]['case'].tolist(), dct['case_id'].tolist())
possld = dct[dct['case_id'].isin(posimg)]['slide_id'].tolist()
for i in negsld:
sldnum = i.split('-')[-1]
pctnum = i[:-3]
big_images.append([pctnum, 0, path + "{}/".format(pctnum), sldnum])
for i in possld:
sldnum = i.split('-')[-1]
pctnum = i[:-3]
big_images.append([pctnum, 1, path + "{}/".format(pctnum), sldnum])
datapd = pd.DataFrame(big_images, columns=['slide', 'label', 'path', 'sldnum'])
return datapd
# seperate into training and testing; each type is the same separation ratio on big images
# test and train csv files contain tiles' path.
def set_sep(alll, path, cls, cut=0.3, batchsize=24):
trlist = []
telist = []
valist = []
CPTAC = alll
for i in range(cls):
subset = CPTAC.loc[CPTAC['label'] == i]
unq = list(subset.slide.unique())
np.random.shuffle(unq)
validation = unq[:int(len(unq) * cut / 2)]
valist.append(subset[subset['slide'].isin(validation)])
test = unq[int(len(unq) * cut / 2):int(len(unq) * cut)]
telist.append(subset[subset['slide'].isin(test)])
train = unq[int(len(unq) * cut):]
trlist.append(subset[subset['slide'].isin(train)])
test = pd.concat(telist)
train = | pd.concat(trlist) | pandas.concat |
import pandas as pd
from databalancer.paraphraseGeneratorClient import paraPharaseGenerator
from databalancer.paraphraseGeneratorClient import modelAndTokenizerInitializer
from databalancer.paraphraseInputGeneratorClient import paraphraseInputSentenceGenerator
import matplotlib.pyplot as plt
'''
Datset balancer function
1 - identify the column names from an input dataset
2 - Find the class with maximum text count
3 - Identify the number of texts required for each class to meet the maximum value
4 - Using t5_paraphraser generate as many as texts for each class to meet the maximum value
5 - Depends on the saveAsCsv value,store the balanced dataset as balanced_data.csv to local machine or return the balanced pandas
dataframe to user
'''
def balanceDataset(dataset_name,saveAsCsv=True,pretrained_model="ramsrigouthamg/t5_paraphraser",pretrained_tokenizer="t5-base",seed=42):
data = pd.read_csv(dataset_name)
model,tokenizer,device = modelAndTokenizerInitializer(pretrained_model,pretrained_tokenizer,seed)
columnList = list()
for col in data.columns:
columnList.append(col)
text_column = columnList[0]
class_column = columnList[1]
dataOriginal = data
value_dict = data[class_column].value_counts().to_dict()
balanced_flag = len(list(set(list(value_dict.values())))) == 1
print("Balancing started ")
iteration_count = 0
while not (balanced_flag):
iteration_count += 1
print("Balancing iteration " + str(iteration_count) + "...")
balanceCountDict = dict()
max_key = max(value_dict, key=value_dict.get)
max_count = value_dict[max_key]
value_dict.pop(max_key)
for key, value in value_dict.items():
balanceCountDict[key] = max_count - value
for key, value in balanceCountDict.items():
if (value != 0):
inputSentenceList = paraphraseInputSentenceGenerator(data,class_column,text_column,key)
if (value < 5):
each_para_count = 1
inputSentenceList = inputSentenceList[:value]
else:
each_para_count = int(value / 5)
paraQuestionlist = []
for sentence in inputSentenceList:
paraQuestionlist = paraPharaseGenerator(sentence,each_para_count,model,tokenizer,device)
paraFrame = {
text_column: paraQuestionlist,
class_column: key
}
each_df = pd.DataFrame(paraFrame, columns=[text_column, class_column])
dataOriginal = | pd.concat([dataOriginal, each_df], ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 02 16:27:16 2017
@author: daniel
"""
import Tomography as tom
import quPy as qp
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import json
import io
dataNN=np.loadtxt("foersterdefect_n_n.tsv")
dataNN_2=np.loadtxt("foersterdefect_n_n-2.tsv")
plot_dict={}
f=plt.figure()
#plt.subplot(121)
#plt.plot(dataNN[:,0], dataNN[:,1]/1e9, marker='o',ls='',color='b')
#plt.plot(dataNN[:,0], dataNN[:,2]/1e9, marker='o',ls='',color='r')
#plt.plot(dataNN[:,0], dataNN[:,3]/1e9, marker='o',ls='',color='g')
#plt.plot(dataNN[:,0], dataNN[:,4]/1e9, marker='o',ls='',color='orange')
#plt.xlim((45,120))
#plt.ylim((-4,0))
h=pd.DataFrame(index=dataNN[:,0],data=dataNN[:,1]/1e9)
h2=pd.DataFrame(index=dataNN[:,0],data=dataNN[:,2]/1e9)
h3=pd.DataFrame(index=dataNN[:,0],data=dataNN[:,3]/1e9)
h4= | pd.DataFrame(index=dataNN[:,0],data=dataNN[:,4]/1e9) | pandas.DataFrame |
import re
import pandas as pd
# import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
import numpy as np
import seaborn as sns; sns.set()
from scipy.spatial.distance import squareform
from scipy.spatial.distance import pdist, euclidean
from sklearn.preprocessing import MinMaxScaler
from datetime import datetime, timedelta
from io import StringIO, BytesIO
from app.models import Country, CountryStatus
import base64
import plotly.figure_factory as ff
data_dir = 'data/'
def get_all_places(level='countries'):
# df_places = pd.read_csv(data_dir + 'all_{}_compare.csv'.format(level))
df_places = Country.all_countries_names_as_df()
return list(df_places['Name'])
def get_all_countries_response():
df_places = pd.read_csv(data_dir + 'all_countries_response.csv')
return list(df_places['Country'])
def get_df_similar_places(place, level = 'countries'):
# if level == 'cities':
# df_sim = pd.read_csv(data_dir + 'all_{}_similarity.csv'.format(level))
# df_sim = df_sim[df_sim['CityBase'] == place]
# df_sim = df_sim[['Name', 'gap', 'dist', 'Similarity']].set_index('Name')
# return df_sim
# df_orig = pd.read_csv(data_dir + 'total_cases_{}_normalized.csv'.format(level))
df_orig = Country.all_countries_as_df()
df_orig_piv_day = df_orig.pivot(index='Name', columns='Day', values='TotalDeaths')
df_orig_piv_day = df_orig_piv_day.fillna(0)
sr_place = df_orig_piv_day.loc[place,]
place_start = (sr_place > 0).idxmax()
# place_start_cases = (df_orig.set_index('Name').loc[place,].set_index('Day')['Total'] > 0).idxmax()
days_ahead = 14 #if level == 'countries' else 5
df_places_ahead = df_orig_piv_day[df_orig_piv_day.loc[:, max(place_start - days_ahead,0)] > 0.0]
df_places_rate_norm = df_orig_piv_day.loc[df_places_ahead.index, :]
# df_places_rate_norm = df_orig_piv_day.loc[['France', 'Italy'], :]
df_places_rate_norm = df_places_rate_norm.append(df_orig_piv_day.loc[place,])
# reverse order to keep base place on top
df_places_rate_norm = df_places_rate_norm.iloc[::-1]
sr_place = df_orig_piv_day.loc[place,]
# place_start = (sr_place > 0).idxmax()
# sr_place_compare = sr_place.loc[place_start:].dropna()
sr_place = df_orig_piv_day.loc[place,]
place_start = (sr_place > 0).idxmax()
sr_place_compare = sr_place.loc[place_start:].dropna()
df_places_gap = pd.DataFrame({'Name': [], 'gap': [], 'dist': []})
df_places_gap = df_places_gap.append(pd.Series([place, 0.0, -1], index=df_places_gap.columns),
ignore_index=True)
for other_place in df_places_rate_norm.index[1:]:
sr_other_place = df_places_rate_norm.loc[other_place,].fillna(0)
min_dist = np.inf
min_pos = 0
for i in range(0, 1 + len(sr_other_place) - len(sr_place_compare)):
sr_other_place_compare = sr_other_place[i: i + len(sr_place_compare)]
dist = euclidean(sr_place_compare, sr_other_place_compare)
if (dist < min_dist):
min_dist = dist
min_pos = i
day_place2 = sr_other_place.index[min_pos]
gap = day_place2 - place_start
df_places_gap = df_places_gap.append(
pd.Series([other_place, gap, min_dist], index=df_places_gap.columns),
ignore_index=True)
df_places_gap = df_places_gap.set_index('Name')
similar_places = df_places_gap.sort_values('dist')
dist_max = euclidean(sr_place_compare, np.zeros(len(sr_place_compare)))
similar_places['Similarity'] = similar_places['dist'].apply(lambda x: (1.0 - x / dist_max) if x >= 0 else 1)
return similar_places
# get similar places based on alighment of death curve
def get_similar_places(place, level = 'countries'):
similar_places = get_df_similar_places(place, level = level)
# print(similar_places)
tuples = [tuple(x) for x in similar_places[1:8].reset_index().to_numpy()]
return tuples
#get similar places based on socioeconomic features
def get_similar_places_socio(place, level = 'countries'):
df_socio_stats_orig = pd.read_csv(data_dir + 'socio_stats_{}.csv'.format(level)).drop('score', axis=1)
if not len(df_socio_stats_orig.query('Name == "{}"'.format(place))): return []
df_socio_stats_orig_piv = df_socio_stats_orig.pivot(index='Name', columns='variable')
df_socio_stats_orig_piv = df_socio_stats_orig_piv.fillna(df_socio_stats_orig_piv.mean())
scaler = MinMaxScaler() # feature_range=(-1, 1)
df_socio_stats_orig_piv_norm = pd.DataFrame(scaler.fit_transform(df_socio_stats_orig_piv),
columns=df_socio_stats_orig_piv.columns,
index=df_socio_stats_orig_piv.index)
df_dist = pd.DataFrame(squareform(pdist(df_socio_stats_orig_piv_norm)), index=df_socio_stats_orig_piv_norm.index,
columns=df_socio_stats_orig_piv_norm.index)
df_sim = df_dist.loc[:, place].to_frame(name='dist')
df_sim['similarity'] = 1 - (df_sim['dist'] / df_sim['dist'].max())
df_sim = df_sim.sort_values('similarity', ascending=False).drop('dist', axis=1)
tuples = [tuple(x) for x in df_sim[1:11].reset_index().to_numpy()]
return tuples
def get_places_by_variable(type = 'socio', level = 'countries', variable = 'Population', ascending = False):
if type == 'socio':
df_orig = pd.read_csv(data_dir + 'socio_stats_{}.csv'.format(level)).drop('score', axis=1)
else:
df_orig = pd.read_csv(data_dir + 'live_stats_{}.csv'.format(level))
# df_orig = df_orig.groupby(['Name', 'Date']).tail(1)
df_orig = df_orig[df_orig['variable'] == variable].pivot(index='Name', columns='variable', values='value').reset_index()
df_orig = df_orig[['Name', variable]].sort_values(variable, ascending = ascending).head(10)
tuples = [tuple(x) for x in df_orig.reset_index(drop=True).to_numpy()]
return tuples
def get_fig_compare_rates(place, place2, level = 'countries', scale='log', y='total', mode='static', priority = 'now'):
df_places_to_show = get_place_comparison_df(place, place2, level = level, priority = priority)
fig = make_chart_comparison(df_places_to_show, level = level, scale=scale, y=y, mode=mode)
return fig
def get_html_compare_response(place, place2, level = 'countries', scale='log', y='total', mode='static', priority = 'now'):
# df_places_to_show = get_place_comparison_df(place, place2, level = level, priority = priority, type = 'response')
data_dir = 'data/'
df_orig = pd.read_csv(data_dir + 'response/official_response_countries.csv', parse_dates=['Date'])
cols = list(df_orig.columns[df_orig.dtypes.eq('float64')][:15]) + ['ConfirmedDeaths']
df_orig[cols] = df_orig[cols].astype(pd.Int64Dtype())
countries = [place, place2]
df_orig = df_orig[df_orig['Name'].isin(countries)]
df_gantt = df_orig[['Name', 'Date', 'StringencyIndexForDisplay', 'ConfirmedDeaths']].rename(
columns={'Date': 'Start', 'Name': 'Task'})
df_gantt['StringencyIndexForDisplay'] = df_gantt['StringencyIndexForDisplay'].fillna(0)
df_gantt['Finish'] = df_gantt['Start'] + timedelta(days=1)
df_gantt['Description'] = df_orig.apply(lambda
x: "Stringency Index: {StringencyIndexForDisplay}<br>Confirmed Deaths: {ConfirmedDeaths}<br>School closing: {C1_School closing}<br>Workplace closing: {C2_Workplace closing}<br>Cancel public events: {C3_Cancel public events}<br>Restrictions on gatherings: {C4_Restrictions on gatherings}<br>Close public transport: {C5_Close public transport}<br>Stay at home requirements: {C6_Stay at home requirements}<br>Restrictions on internal movement: {C7_Restrictions on internal movement}<br>International travel controls: {C8_International travel controls}".format(
**x), axis=1)
df_gantt['ConfirmedDeaths'] = np.log(df_gantt['ConfirmedDeaths'])
df_gantt = df_gantt.replace([-np.inf], 0)
df_gantt['DeathsNorm'] = 0.7 * (df_gantt['ConfirmedDeaths'] - df_gantt['ConfirmedDeaths'].min()) / (
df_gantt['ConfirmedDeaths'].max() - df_gantt['ConfirmedDeaths'].min()) - 0.35
df_gantt_c1 = df_gantt[df_gantt['Task'] == place]
df_gantt_c1['DeathsNorm'] = df_gantt_c1['DeathsNorm'] + 1
df_gantt_c2 = df_gantt[df_gantt['Task'] == place2]
fig = make_chart_response_comparison(df_gantt_c1, df_gantt_c2, level = level, scale=scale, y=y, mode=mode)
return fig
def get_html_compare_response_econ(place, place2, level = 'countries', scale='log', y='total', mode='static', priority = 'now'):
# df_places_to_show = get_place_comparison_df(place, place2, level = level, priority = priority, type = 'response')
data_dir = 'data/'
df_orig = pd.read_csv(data_dir + 'response/official_response_economic_countries.csv', parse_dates=['Date'])
# cols = list(df_orig.columns[df_orig.dtypes.eq('float64')][:15]) + ['ConfirmedDeaths']
# df_orig[cols] = df_orig[cols].astype(pd.Int64Dtype())
countries = [place, place2]
df_orig = df_orig[df_orig['Name'].isin(countries)]
df_gantt = df_orig[['Name', 'Date', 'EconomicSupportIndexForDisplay', 'ConfirmedDeaths', 'Description']].rename(
columns={'Date': 'Start', 'Name': 'Task'})
df_gantt['EconomicSupportIndexForDisplay'] = df_gantt['EconomicSupportIndexForDisplay'].fillna(0)
df_gantt['Finish'] = df_gantt['Start'] + timedelta(days=1)
df_gantt['ConfirmedDeaths'] = np.log(df_gantt['ConfirmedDeaths'])
df_gantt = df_gantt.replace([-np.inf], 0)
df_gantt['DeathsNorm'] = 0.7 * (df_gantt['ConfirmedDeaths'] - df_gantt['ConfirmedDeaths'].min()) / (
df_gantt['ConfirmedDeaths'].max() - df_gantt['ConfirmedDeaths'].min()) - 0.35
df_gantt_c1 = df_gantt[df_gantt['Task'] == place]
df_gantt_c1['DeathsNorm'] = df_gantt_c1['DeathsNorm'] + 1
df_gantt_c2 = df_gantt[df_gantt['Task'] == place2]
fig = make_chart_response_comparison(df_gantt_c1, df_gantt_c2, level = level, scale=scale, y=y, mode=mode, var='EconomicSupportIndexForDisplay')
return fig
def get_fig_compare_doubling_rates(place, place2, level = 'countries'):
df_places_to_show = get_place_comparison_df(place, place2, level = level)
fig = make_chart_comparison_growth(df_places_to_show, level = level)
return fig
def get_fig_response(country):
df_orig_response = pd.read_csv(data_dir + 'pollution_countries_raw.csv', parse_dates=['Date'])
df_orig_cases = pd.read_csv(data_dir + 'total_cases_countries_normalized.csv', parse_dates=['Date']).rename(
columns={'Name': 'Country'})
df_orig = pd.merge(df_orig_response, df_orig_cases, how='left')
df_to_show = df_orig[df_orig['Country'] == country][['Country', 'City', 'Date', 'no2', 'TotalDeaths']].sort_values('Date')
deaths_start = 10
start_deaths = (df_to_show['TotalDeaths'] >= deaths_start).idxmax()
avg_before_deaths = df_to_show.loc[:start_deaths, 'no2'].mean()
start_display = max(start_deaths - 60, 0)
df_to_show = df_to_show.loc[start_display:, ]
df_to_show['no2'] = df_to_show[['no2']].rolling(5).mean()
fig = make_chart_response(country, deaths_start, avg_before_deaths, df_to_show)
return fig
def get_places_gap_df(df_orig, place, place2, priority = 'now'):
df_places_gap = pd.DataFrame({'Name': [], 'gap': [], 'dist': []})
df_places_gap = df_places_gap.append(pd.Series([place, 0.0, -1], index=df_places_gap.columns),
ignore_index=True)
df_orig = df_orig.set_index('Name')
if not ((df_orig.loc[place,'TotalDeaths'].max()>0) and (df_orig.loc[place2,'TotalDeaths'].max()>0)):
# one of the places has 0 deaths
min_dist = 0 # means nothing here
dist_max = 1 # means nothing here
gap = 0
elif priority != 'now':
# must align based on beginning of deaths
day_place = (df_orig.loc[place,:].set_index('Day')['TotalDeaths'] > 10).idxmax()
day_place2 = (df_orig.loc[place2,:].set_index('Day')['TotalDeaths'] > 10).idxmax()
min_dist = 0 # means nothing here
dist_max = 1 # means nothing here
gap = day_place2 - day_place
else:
# similarity alignment
df_orig_piv_day = df_orig.reset_index().pivot(index='Name', columns='Day', values='TotalDeaths')
sr_place = df_orig_piv_day.loc[place,]
place_start = (sr_place > 0).idxmax()
sr_place_compare = sr_place.loc[place_start:].dropna()
sr_other_place = df_orig_piv_day.loc[place2,].fillna(0)
min_dist = np.inf
min_pos = 0
for i in range(0, 1 + len(sr_other_place) - len(sr_place_compare)):
sr_other_place_compare = sr_other_place[i: i + len(sr_place_compare)]
dist = euclidean(sr_place_compare, sr_other_place_compare)
if (dist < min_dist):
min_dist = dist
min_pos = i
dist_max = euclidean(sr_place_compare, np.zeros(len(sr_place_compare)))
day_place2 = sr_other_place.index[min_pos]
# gap = min_pos - place_start
gap = day_place2 - place_start
df_places_gap = df_places_gap.append(
pd.Series([place2, gap, min_dist], index=df_places_gap.columns),
ignore_index=True)
df_places_gap = df_places_gap.set_index('Name')#.sort_values('dist')
df_places_gap['Similarity'] = df_places_gap['dist'].apply(lambda x: (1.0 - x / dist_max) if x >= 0 else 1)
return df_places_gap
def get_total_cases_df_adjusted(df_orig, df_places_gap, place, place2):
df_total_cases = df_orig.set_index('Name')
df_total_cases_top = df_total_cases.join(df_places_gap)
df_total_cases_top['DayAdj'] = ((df_total_cases_top['Day'] - df_total_cases_top['gap']) - 1).astype(int)
# df_total_cases_top.loc[place2, 'DayAdj'] = ((df_total_cases_top.loc[place2, 'Day'] - df_total_cases_top.loc[place2, 'gap']) - 1)
# df_total_cases_top['DayAdj'] = df_total_cases_top['DayAdj'].astype(int)
return df_total_cases_top
def get_place_comparison_df(place, place2, level = 'countries', priority = 'now'):
# df_orig = pd.read_csv(data_dir + 'total_cases_{}_normalized.csv'.format(level))
df_orig = Country.all_countries_as_df()
# to force place order
df_orig_c1 = df_orig[df_orig['Name'] == place]
df_orig_c2 = df_orig[df_orig['Name'] == place2]
len_c1 = len(df_orig_c1[df_orig_c1['TotalDeaths'] > 0])
len_c2 = len(df_orig_c2[df_orig_c2['TotalDeaths'] > 0])
# place has to be the one with smallest number of values for Deaths
if (len_c1 > len_c2):
place, place2 = place2, place
df_orig = pd.concat([df_orig_c2, df_orig_c1])
else:
df_orig = pd.concat([df_orig_c1, df_orig_c2])
df_countries_gap = get_places_gap_df(df_orig, place, place2, priority)
df_total_cases_top = get_total_cases_df_adjusted(df_orig, df_countries_gap, place, place2)
place_start_cases = (df_orig.set_index('Name').loc[place,].set_index('Day')['Total'] > 0).idxmax()
df_total_cases_top = df_total_cases_top[df_total_cases_top['DayAdj'] >= place_start_cases]
return df_total_cases_top.reset_index()
def make_chart_comparison(df_places_to_show, level='countries', scale='log', y='total', mode='static'):
week = mdates.WeekdayLocator(interval=2) # every year
months = mdates.MonthLocator() # every month
month_fmt = mdates.DateFormatter('%b-%d')
var_y_suffix = '' if y == 'total' else 'Per100k'
label_y_scale = ' (log)' if scale == 'log' else ''
label_y_y = '' if y == 'total' else ' per 100k'
# get last date from dataframe
date = df_places_to_show['Date'].max() # datetime.today().strftime('%Y-%m-%d')
gap = int(df_places_to_show['gap'].min())
y_lim = df_places_to_show['Total' + var_y_suffix].max() #* 1.2
# Generate the figure **without using pyplot**.
fig = Figure(figsize=(8, 5))
ax = fig.subplots()
places_to_show = df_places_to_show['Name'].unique()[:2]
place_name = 'Country' if level == 'countries' else 'City'
df_places_to_show = df_places_to_show.rename(columns={'Name': place_name})
ax.set_title('{} Comparison - COVID-19 Cases vs. Deaths - {}'.format(place_name, date), fontsize=14)
sns.scatterplot(x="DayAdj", y='Total' + var_y_suffix, hue=place_name, lw=6, alpha=0.8, data=df_places_to_show,
ax=ax)
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(month_fmt)
ax.legend(loc='upper left', title="Confirmed cases", frameon=True)
ax.set(ylabel='Total confirmed cases{}{}'.format(label_y_y, label_y_scale),
xlabel="Date for {} ({}'s data shifted {} days to align death curves)".format(places_to_show[0],
places_to_show[1], gap))
ax.set_ylim(0.5, y_lim) if scale == 'log' else ax.set_ylim(-5, y_lim)
ax2 = ax.twinx()
if scale == 'log':
ax.set_yscale('log')
ax2.set_yscale('log')
ax.yaxis.set_major_formatter(ticker.FuncFormatter(lambda y, _: '{:g}'.format(y)))
ax2.yaxis.set_major_formatter(ticker.FuncFormatter(lambda y, _: '{:g}'.format(y)))
ax2.grid(False)
sns.lineplot(x="DayAdj", y='TotalDeaths' + var_y_suffix, hue=place_name, alpha=0.7, lw=6, ax=ax2,
data=df_places_to_show)
ax2.legend(loc='lower right', title="Deaths", frameon=True)
ax2.set(ylabel='Total deaths{}{}'.format(label_y_y, label_y_scale))
ax2.set_ylim(0.5, y_lim) if scale == 'log' else ax2.set_ylim(-5, y_lim)
logo = plt.imread('./static/img/new_logo_site.png')
ax.figure.figimage(logo, 95, 70, alpha=.35, zorder=1)
fig.tight_layout()
# display(fig)
# Save it to a temporary buffer.
buf = BytesIO()
fig.savefig(buf, format="png")
buf.seek(0)
return buf
def make_chart_response_comparison(df_gantt_c1, df_gantt_c2, level='countries', scale='log', y='total', mode='static', var='StringencyIndexForDisplay'):
# to force place order
df_gantt = pd.concat([df_gantt_c1, df_gantt_c2])
fig = ff.create_gantt(df_gantt, colors=['#93e4c1', '#333F44'], index_col=var,
show_colorbar=False, bar_width=0.2, showgrid_x=True, showgrid_y=True, group_tasks=True,
title='Comparing response',
height=350
)
fig.add_scatter(x=df_gantt_c1['Start'], y=df_gantt_c1['DeathsNorm'], hoverinfo='skip',
line=dict(color='rgb(222, 132, 82)', width=4))
fig.add_scatter(x=df_gantt_c2['Start'], y=df_gantt_c2['DeathsNorm'], hoverinfo='skip',
line=dict(color='rgb(222, 132, 82)', width=4))
fig.update_layout(
xaxis=dict(
showline=True,
showgrid=False,
showticklabels=True,
linecolor='rgb(204, 204, 204)',
linewidth=2,
ticks='outside',
tickfont=dict(
family='Arial',
size=12,
color='rgb(82, 82, 82)',
),
type="date"
),
yaxis=dict(
showgrid=False,
zeroline=False,
showline=False,
showticklabels=True,
autorange=True,
),
autosize=False,
margin=dict(
autoexpand=False,
l=100,
r=20,
t=110,
),
showlegend=False,
plot_bgcolor='white'
)
annotations = []
annotations.append(dict(xref='paper', yref='paper', x=0.5, y=-0.13,
xanchor='center', yanchor='top',
text='Date',
font=dict(family='Arial',
size=12,
color='rgb(150,150,150)'),
showarrow=False))
fig.update_layout(annotations=annotations)
# fig.write_html("gantt.html")
# fig.show()
html = fig.to_html(full_html=False, include_plotlyjs=False, )
return html
def make_chart_comparison_growth(df_places_to_show, level='countries'):
# get last date from dataframe
date = df_places_to_show['Date'].max() # datetime.today().strftime('%Y-%m-%d')
gap = int(df_places_to_show['gap'].min())
# Generate the figure **without using pyplot**.
fig = Figure(figsize=(8, 6))
axs = fig.subplots(nrows=2)
place_name = 'Country' if level == 'countries' else 'City'
axs[0].set_title('{} Comparison - COVID-19 Weekly Growth (%) - {}'.format(place_name, date), fontsize=14)
places_to_show = df_places_to_show['Name'].unique()[:2]
df_places_to_show = df_places_to_show.rename(columns={'Name': place_name})
sns.lineplot(x="DayAdj", y='WeeklyGrowth', hue=place_name, lw = 6, alpha = 0.8, ax=axs[0], data=df_places_to_show)
axs[0].set(ylabel='Weekly growth of cases', xlabel='')
axs[0].set_ylim(0, 500)
sns.lineplot(x="DayAdj", y='WeeklyGrowthDeaths', hue=place_name, alpha = 0.7, lw = 6, ax=axs[1], data=df_places_to_show)
axs[1].set(ylabel='Weekly growth of deaths', xlabel="Day ({}'s data shifted {} days for the death curves to align)".format(places_to_show[1], gap))
axs[1].set_ylim(0, 500)
# Save it to a temporary buffer.
buf = BytesIO()
fig.savefig(buf, format="png")
buf.seek(0)
return buf
def make_chart_response(country, deaths_start, avg_before_deaths, df_to_show):
city = df_to_show['City'].iloc[0]
df_quar = pd.read_csv(data_dir + 'all_countries_response.csv', parse_dates = ['Quarantine'])
quarantine = df_quar[df_quar['Country'] == country]['Quarantine'].iloc[0]
week = mdates.WeekdayLocator(interval=2) # every year
months = mdates.MonthLocator() # every month
month_fmt = mdates.DateFormatter('%b-%d')
y_lim = df_to_show['TotalDeaths'].max() * 1.2
y2_lim = df_to_show['no2'].max() * 1.8
# Generate the figure **without using pyplot**.
fig = Figure(figsize=(10, 5))
ax = fig.subplots()
ax.set_title('Assessing quarantine implementation - ' + country, fontsize=16, loc='left')
if not pd.isnull(quarantine): ax.axvline(x=quarantine, color='k', linestyle='--', lw=3, label='Official quarantine')
ax.scatter(df_to_show['Date'], df_to_show['TotalDeaths'], color='black', alpha = 0.7, label = 'Confirmed deaths')
ax.xaxis.set_major_locator(week)
ax.xaxis.set_major_formatter(month_fmt)
ax.set_yscale('log')
ax.yaxis.set_major_formatter(ticker.FuncFormatter(lambda y, _: '{:g}'.format(y)))
ax.set_ylim(1, y_lim)
ax.set(ylabel='Confirmed deaths')
ax2 = ax.twinx()
sns.lineplot(x="Date", y='no2', alpha = 0.7, lw = 6, label = 'Daily $\mathrm{{NO}}_2$ pollution *', ax=ax2, data=df_to_show)
sns.lineplot(x="Date", y=avg_before_deaths, alpha = 0.7, lw = 6, label = 'Average pollution **', ax=ax2, data=df_to_show)
ax2.grid(False)
ax2.xaxis.set_major_locator(week)
ax2.xaxis.set_major_formatter(month_fmt)
ax2.set_ylim(1, y2_lim)
ax2.set(ylabel='$\mathrm{{NO}}_2$ pollution')
# ask matplotlib for the plotted objects and their labels
lines, labels = ax.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc='upper left')
annotation = """* Median of $\mathrm{{NO}}_2$ measurements in the most affected city ({city}), 5 days rolling average over time series\n** Average daily $\mathrm{{NO}}_2$ measurements from the begining of 2020 until the first day after {deaths_start} deaths""".format(city=city, deaths_start = deaths_start)
ax.annotate(annotation, (0,0), (0, -30), xycoords='axes fraction', textcoords='offset points', va='top')
logo = plt.imread('./static/img/new_logo_site.png')
ax.figure.figimage(logo, 100, 110, alpha=.35, zorder=1)
fig.tight_layout()
# Save it to a temporary buffer.
buf = BytesIO()
fig.savefig(buf, format="png")
buf.seek(0)
return buf
def get_timeline_list(place, place2, level = 'countries'):
# df_orig = pd.read_csv(data_dir + 'total_cases_{}_normalized.csv'.format(level))
df_orig = Country.all_countries_as_df()
# to force place order
df_orig_c1 = df_orig[df_orig['Name'] == place]
df_orig_c2 = df_orig[df_orig['Name'] == place2]
len_c1 = len(df_orig_c1[df_orig_c1['TotalDeaths'] > 0])
len_c2 = len(df_orig_c2[df_orig_c2['TotalDeaths'] > 0])
# place has to be the one with smallest number of values for Deaths
if (len_c1 > len_c2):
place, place2 = place2, place
df_orig = pd.concat([df_orig_c2, df_orig_c1])
else:
df_orig = pd.concat([df_orig_c1, df_orig_c2])
df_places_gap = get_places_gap_df(df_orig, place, place2)
df_total_cases_top = get_total_cases_df_adjusted(df_orig, df_places_gap, place, place2)
places = [place, place2]
df_places_to_show = df_total_cases_top.loc[places, :]
places_to_show = list(df_places_to_show.index.unique())
df_events_owd = pd.DataFrame({'Date': [], 'Name': [], 'Desc': [], 'FullText': [], 'Highlight': []})
today = df_places_to_show['Date'].max()
for c in places_to_show:
df_place = df_places_to_show.loc[c,]
# df_events_owd = df_events_owd.append(pd.DataFrame({'Date':['2019-12-31'], 'Name': [c], 'Desc':['Begining of epidemic'], 'FullText':['First day of data tracking.']}))
df_events_owd = df_events_owd.append(
pd.Series([(df_place.set_index('Date')['Total'] > 0).idxmax(), c, '1st Confirmed Case', '', 1],
index=df_events_owd.columns), ignore_index=True)
df_events_owd = df_events_owd.append(
pd.Series([(df_place.set_index('Date')['TotalDeaths'] > 0).idxmax(), c, '1st Death', '', 5],
index=df_events_owd.columns), ignore_index=True)
msg = """{} is approximately {} days behind {}'s epidemic progression.
This is an estimate based on matching their death growth curves.""".format(place, abs(
df_places_gap.loc[place2, 'gap']), place2)
df_events_owd = df_events_owd.append(pd.Series([today, c, 'Today', msg, 1], index=df_events_owd.columns),
ignore_index=True)
df_events_owd['Source'] = 'Our World in Data'
# Adding data from Situation Reports
if level == 'countries':
df_events_sr = pd.read_csv(data_dir + 'situation_reports_countries_highlight.csv')
else:
df_events_sr = pd.DataFrame({'Name':[]})
df_events_sr = df_events_sr[df_events_sr['Name'].isin([place, place2])]
df_events = | pd.concat([df_events_owd, df_events_sr], sort=True) | pandas.concat |
"""
Combines medication statistics for various sublocalizations.
"""
import pandas as pd
from click import *
from logging import *
from typing import *
def load_data(path: str, sublocalization: str) -> pd.DataFrame:
"""
Loads data from the given path and with the given sublocalization.
Args:
path: the path to load data from.
sublocalization: the sublocalization to assign to the loaded data.
"""
debug(f'Loading {path!r}')
result = pd.read_csv(path)
debug(f'Result: {result.shape}')
result['sublocalization'] = sublocalization
return result
@command()
@option(
'--input',
required=True,
multiple=True,
help='the CSV files to read inputs from')
@option(
'--sublocalization',
required=True,
multiple=True,
help='the sublocalizations to assign to each input')
@option('--output', required=True, help='the Excel file to write output to')
def main(input: Tuple[str], sublocalization: Tuple[str], output: str):
if len(input) != len(sublocalization):
raise UsageError(
'number of --inputs must match the number of --sublocalizations')
basicConfig(level=DEBUG)
info('Loading data')
data = [load_data(i, s) for i, s in zip(input, sublocalization)]
info('Concatenating data')
data = | pd.concat(data) | pandas.concat |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
import operator
import warnings
import numpy as np
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range, to_timedelta
import pandas._testing as tm
from pandas.core.algorithms import checked_add_with_arr
from .pandas_vb_common import numeric_dtypes
try:
import pandas.core.computation.expressions as expr
except ImportError:
import pandas.computation.expressions as expr
try:
import pandas.tseries.holiday
except ImportError:
pass
class IntFrameWithScalar:
params = [
[np.float64, np.int64],
[2, 3.0, np.int32(4), np.float64(5)],
[
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.floordiv,
operator.pow,
operator.mod,
operator.eq,
operator.ne,
operator.gt,
operator.ge,
operator.lt,
operator.le,
],
]
param_names = ["dtype", "scalar", "op"]
def setup(self, dtype, scalar, op):
arr = np.random.randn(20000, 100)
self.df = DataFrame(arr.astype(dtype))
def time_frame_op_with_scalar(self, dtype, scalar, op):
op(self.df, scalar)
class OpWithFillValue:
def setup(self):
# GH#31300
arr = np.arange(10 ** 6)
df = DataFrame({"A": arr})
ser = df["A"]
self.df = df
self.ser = ser
def time_frame_op_with_fill_value_no_nas(self):
self.df.add(self.df, fill_value=4)
def time_series_op_with_fill_value_no_nas(self):
self.ser.add(self.ser, fill_value=4)
class MixedFrameWithSeriesAxis:
params = [
[
"eq",
"ne",
"lt",
"le",
"ge",
"gt",
"add",
"sub",
"truediv",
"floordiv",
"mul",
"pow",
]
]
param_names = ["opname"]
def setup(self, opname):
arr = np.arange(10 ** 6).reshape(1000, -1)
df = DataFrame(arr)
df["C"] = 1.0
self.df = df
self.ser = df[0]
self.row = df.iloc[0]
def time_frame_op_with_series_axis0(self, opname):
getattr(self.df, opname)(self.ser, axis=0)
def time_frame_op_with_series_axis1(self, opname):
getattr(operator, opname)(self.df, self.ser)
class Ops:
params = [[True, False], ["default", 1]]
param_names = ["use_numexpr", "threads"]
def setup(self, use_numexpr, threads):
self.df = DataFrame(np.random.randn(20000, 100))
self.df2 = DataFrame(np.random.randn(20000, 100))
if threads != "default":
expr.set_numexpr_threads(threads)
if not use_numexpr:
expr.set_use_numexpr(False)
def time_frame_add(self, use_numexpr, threads):
self.df + self.df2
def time_frame_mult(self, use_numexpr, threads):
self.df * self.df2
def time_frame_multi_and(self, use_numexpr, threads):
self.df[(self.df > 0) & (self.df2 > 0)]
def time_frame_comparison(self, use_numexpr, threads):
self.df > self.df2
def teardown(self, use_numexpr, threads):
expr.set_use_numexpr(True)
expr.set_numexpr_threads()
class Ops2:
def setup(self):
N = 10 ** 3
self.df = DataFrame(np.random.randn(N, N))
self.df2 = DataFrame(np.random.randn(N, N))
self.df_int = DataFrame(
np.random.randint(
np.iinfo(np.int16).min, np.iinfo(np.int16).max, size=(N, N)
)
)
self.df2_int = DataFrame(
np.random.randint(
np.iinfo(np.int16).min, np.iinfo(np.int16).max, size=(N, N)
)
)
self.s = Series(np.random.randn(N))
# Division
def time_frame_float_div(self):
self.df // self.df2
def time_frame_float_div_by_zero(self):
self.df / 0
def time_frame_float_floor_by_zero(self):
self.df // 0
def time_frame_int_div_by_zero(self):
self.df_int / 0
# Modulo
def time_frame_int_mod(self):
self.df_int % self.df2_int
def time_frame_float_mod(self):
self.df % self.df2
# Dot product
def time_frame_dot(self):
self.df.dot(self.df2)
def time_series_dot(self):
self.s.dot(self.s)
def time_frame_series_dot(self):
self.df.dot(self.s)
class Timeseries:
params = [None, "US/Eastern"]
param_names = ["tz"]
def setup(self, tz):
N = 10 ** 6
halfway = (N // 2) - 1
self.s = Series(date_range("20010101", periods=N, freq="T", tz=tz))
self.ts = self.s[halfway]
self.s2 = Series(date_range("20010101", periods=N, freq="s", tz=tz))
def time_series_timestamp_compare(self, tz):
self.s <= self.ts
def time_timestamp_series_compare(self, tz):
self.ts >= self.s
def time_timestamp_ops_diff(self, tz):
self.s2.diff()
def time_timestamp_ops_diff_with_shift(self, tz):
self.s - self.s.shift()
class IrregularOps:
def setup(self):
N = 10 ** 5
idx = date_range(start="1/1/2000", periods=N, freq="s")
s = Series(np.random.randn(N), index=idx)
self.left = s.sample(frac=1)
self.right = s.sample(frac=1)
def time_add(self):
self.left + self.right
class TimedeltaOps:
def setup(self):
self.td = to_timedelta(np.arange(1000000))
self.ts = Timestamp("2000")
def time_add_td_ts(self):
self.td + self.ts
class CategoricalComparisons:
params = ["__lt__", "__le__", "__eq__", "__ne__", "__ge__", "__gt__"]
param_names = ["op"]
def setup(self, op):
N = 10 ** 5
self.cat = pd.Categorical(list("aabbcd") * N, ordered=True)
def time_categorical_op(self, op):
getattr(self.cat, op)("b")
class IndexArithmetic:
params = ["float", "int"]
param_names = ["dtype"]
def setup(self, dtype):
N = 10 ** 6
indexes = {"int": "makeIntIndex", "float": "makeFloatIndex"}
self.index = getattr(tm, indexes[dtype])(N)
def time_add(self, dtype):
self.index + 2
def time_subtract(self, dtype):
self.index - 2
def time_multiply(self, dtype):
self.index * 2
def time_divide(self, dtype):
self.index / 2
def time_modulo(self, dtype):
self.index % 2
class NumericInferOps:
# from GH 7332
params = numeric_dtypes
param_names = ["dtype"]
def setup(self, dtype):
N = 5 * 10 ** 5
self.df = DataFrame(
{"A": np.arange(N).astype(dtype), "B": np.arange(N).astype(dtype)}
)
def time_add(self, dtype):
self.df["A"] + self.df["B"]
def time_subtract(self, dtype):
self.df["A"] - self.df["B"]
def time_multiply(self, dtype):
self.df["A"] * self.df["B"]
def time_divide(self, dtype):
self.df["A"] / self.df["B"]
def time_modulo(self, dtype):
self.df["A"] % self.df["B"]
class DateInferOps:
# from GH 7332
def setup_cache(self):
N = 5 * 10 ** 5
df = DataFrame({"datetime64": np.arange(N).astype("datetime64[ms]")})
df["timedelta"] = df["datetime64"] - df["datetime64"]
return df
def time_subtract_datetimes(self, df):
df["datetime64"] - df["datetime64"]
def time_timedelta_plus_datetime(self, df):
df["timedelta"] + df["datetime64"]
def time_add_timedeltas(self, df):
df["timedelta"] + df["timedelta"]
class AddOverflowScalar:
params = [1, -1, 0]
param_names = ["scalar"]
def setup(self, scalar):
N = 10 ** 6
self.arr = np.arange(N)
def time_add_overflow_scalar(self, scalar):
checked_add_with_arr(self.arr, scalar)
class AddOverflowArray:
def setup(self):
N = 10 ** 6
self.arr = np.arange(N)
self.arr_rev = np.arange(-N, 0)
self.arr_mixed = np.array([1, -1]).repeat(N / 2)
self.arr_nan_1 = np.random.choice([True, False], size=N)
self.arr_nan_2 = np.random.choice([True, False], size=N)
def time_add_overflow_arr_rev(self):
checked_add_with_arr(self.arr, self.arr_rev)
def time_add_overflow_arr_mask_nan(self):
checked_add_with_arr(self.arr, self.arr_mixed, arr_mask=self.arr_nan_1)
def time_add_overflow_b_mask_nan(self):
checked_add_with_arr(self.arr, self.arr_mixed, b_mask=self.arr_nan_1)
def time_add_overflow_both_arg_nan(self):
checked_add_with_arr(
self.arr, self.arr_mixed, arr_mask=self.arr_nan_1, b_mask=self.arr_nan_2
)
hcal = pd.tseries.holiday.USFederalHolidayCalendar()
# These offsets currently raise a NotImplimentedError with .apply_index()
non_apply = [
pd.offsets.Day(),
pd.offsets.BYearEnd(),
pd.offsets.BYearBegin(),
pd.offsets.BQuarterEnd(),
pd.offsets.BQuarterBegin(),
pd.offsets.BMonthEnd(),
pd.offsets.BMonthBegin(),
pd.offsets.CustomBusinessDay(),
pd.offsets.CustomBusinessDay(calendar=hcal),
pd.offsets.CustomBusinessMonthBegin(calendar=hcal),
pd.offsets.CustomBusinessMonthEnd(calendar=hcal),
pd.offsets.CustomBusinessMonthEnd(calendar=hcal),
]
other_offsets = [
pd.offsets.YearEnd(),
| pd.offsets.YearBegin() | pandas.offsets.YearBegin |
import unittest
import numpy as np
import pandas as pd
from pyalink.alink import *
class TestDataFrame(unittest.TestCase):
def setUp(self):
data_null = np.array([
["007", 1, 1, 2.0, True],
[None, 2, 2, None, True],
["12", None, 4, 2.0, False],
["1312", 0, None, 1.2, None],
])
self.df_null = pd.DataFrame({
"f_string": data_null[:, 0],
"f_long": data_null[:, 1],
"f_int": data_null[:, 2],
"f_double": data_null[:, 3],
"f_boolean": data_null[:, 4]
})
data = np.array([
["a", 1, 1, 2.0, True],
["abc", 2, 2, 2.4, True],
["c", 4, 4, 2.0, False],
["a", 0, 1, 1.2, False],
])
self.df = pd.DataFrame({
"f_string": data[:, 0],
"f_long": data[:, 1],
"f_int": data[:, 2],
"f_double": data[:, 3],
"f_boolean": data[:, 4]
})
def test_memory_null(self):
from pyalink.alink.config import g_config
g_config["collect_storage_type"] = "memory"
schema = "f_string string,f_long long,f_int int,f_double double,f_boolean boolean"
op = dataframeToOperator(self.df_null, schema, op_type="batch")
col_names = op.getColNames()
col_types = op.getColTypes()
self.assertEqual(col_names[0], "f_string")
self.assertEqual(col_names[1], "f_long")
self.assertEqual(col_names[2], "f_int")
self.assertEqual(col_names[3], "f_double")
self.assertEqual(col_names[4], "f_boolean")
self.assertEqual(col_types[0], "VARCHAR")
self.assertEqual(col_types[1], "BIGINT")
self.assertEqual(col_types[2], "INT")
self.assertEqual(col_types[3], "DOUBLE")
self.assertEqual(col_types[4], "BOOLEAN")
df2 = op.collectToDataframe()
print(df2)
print(df2.dtypes)
self.assertEqual(df2['f_string'].dtype, pd.StringDtype())
self.assertEqual(df2['f_long'].dtype, pd.Int64Dtype())
self.assertEqual(df2['f_int'].dtype, pd.Int32Dtype())
self.assertEqual(df2['f_double'].dtype, np.float64)
self.assertEqual(df2['f_boolean'].dtype, pd.BooleanDtype())
def test_memory(self):
from pyalink.alink.config import g_config
g_config["collect_storage_type"] = "memory"
schema = "f_string string,f_long long,f_int int,f_double double,f_boolean boolean"
op = dataframeToOperator(self.df, schemaStr=schema, op_type="batch")
col_names = op.getColNames()
col_types = op.getColTypes()
self.assertEqual(col_names[0], "f_string")
self.assertEqual(col_names[1], "f_long")
self.assertEqual(col_names[2], "f_int")
self.assertEqual(col_names[3], "f_double")
self.assertEqual(col_names[4], "f_boolean")
self.assertEqual(col_types[0], "VARCHAR")
self.assertEqual(col_types[1], "BIGINT")
self.assertEqual(col_types[2], "INT")
self.assertEqual(col_types[3], "DOUBLE")
self.assertEqual(col_types[4], "BOOLEAN")
df2 = op.collectToDataframe()
print(df2)
print(df2.dtypes)
self.assertEqual(df2['f_string'].dtype, pd.StringDtype())
self.assertEqual(df2['f_long'].dtype, | pd.Int64Dtype() | pandas.Int64Dtype |
import sys
sys.path.insert(0, './')
try:
import wandb
except:
pass
from rlf.exp_mgr import config_mgr
from rlf.rl.utils import CacheHelper
import yaml
import argparse
from collections import defaultdict
import pickle
import os
import os.path as osp
import pandas as pd
import hashlib
import json
def get_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='./config.yaml')
parser.add_argument('--force-refresh', action='store_true', default=False)
return parser
def get_report_data_from_spec(spec_str, force_refresh=False, cfg='./config.yaml'):
spec = yaml.safe_load(spec_str)
return get_report_data(spec['report_name'], spec['plot_column'], spec['fields'],
force_refresh, cfg)
def get_run_params(wb_run_id):
wb_proj_name = config_mgr.get_prop('proj_name')
wb_entity = config_mgr.get_prop('wb_entity')
api = wandb.Api()
run = api.run(f"{wb_entity}/{wb_proj_name}/{wb_run_id}")
for f in run.files():
if f.name == 'wandb-metadata.json':
with f.download(replace=True) as f:
lines = f.readlines()
data_d = json.loads('\n'.join(lines))
data_d['full_name'] = run.name
return data_d
return None
def get_run_data(run_names, plot_field, method_name,
cfg='./config.yaml'):
config_mgr.init(cfg)
wb_proj_name = config_mgr.get_prop('proj_name')
wb_entity = config_mgr.get_prop('wb_entity')
all_df = None
api = wandb.Api()
for run_name in run_names:
runs = api.runs(f"{wb_entity}/{wb_proj_name}", {"config.prefix": run_name})
assert len(runs) == 1
wbrun = next(iter(runs))
df = wbrun.history(samples=15000)
df = df[['_step', plot_field]]
df['run'] = run_name
if all_df is None:
all_df = df
else:
all_df = pd.concat([all_df, df])
all_df['method'] = method_name
return all_df
def get_run_ids_from_report(wb_search, report_name, get_sections, api):
reports = api.reports(wb_search)
report = None
for cur_report in reports:
id_parts = cur_report.description.split('ID:')
if len(id_parts) > 1:
cur_id = id_parts[1].split(' ')[0]
if report_name == cur_id:
report = cur_report
break
if report is None:
raise ValueError('Could not find report')
# Find which section the run sets are in
report_section_idx = None
run_sets = None
try:
for i in range(len(report.sections)):
if 'runSets' in report.sections[i]:
report_section_idx = i
break
run_sets = report.sections[report_section_idx]['runSets']
except Exception as e:
for i in range(len(report.spec['blocks'])):
spec = report.spec['blocks'][i]
if 'metadata' in spec and 'runSets' in spec['metadata']:
report_section_idx = i
break
run_sets = report.spec['blocks'][i]['metadata']['runSets']
run_ids = []
for run_set in run_sets:
report_section = run_set['name']
if report_section not in get_sections:
continue
report_runs = run_set['selections']['tree']
for run_id in report_runs:
run_ids.append((report_section, run_id))
if len(run_ids) == 0:
raise ValueError("""
Could not find runs %s from report. Check:
- There is only one section.
- The names don't have trailing spaces.
- The report is saved.
""" % str(get_sections))
return run_ids
def get_report_data(report_name, plot_field, plot_sections,
force_refresh=False, match_pat=None, other_plot_fields=[],
cfg='./config.yaml', other_fetch_fields=[], get_any_cols=False):
"""
Converts the selected data sets in a W&B report into a Pandas DataFrame.
Fetches only the plot_field you specify.
- get_any_cols: If true, will filter plot_field to be the subset of columns
which in the report.
"""
config_mgr.init(cfg)
wb_proj_name = config_mgr.get_prop('proj_name')
wb_entity = config_mgr.get_prop('wb_entity')
wb_search = config_mgr.get_prop('wb_search', wb_entity+'/'+wb_proj_name)
save_report_name = report_name.replace(' ', '-').replace("/", "-")
cacher = CacheHelper(f"{wb_entity}_{wb_proj_name}_{save_report_name}",
plot_sections)
all_df = None
if cacher.exists() and not force_refresh:
all_df = cacher.load()
uniq_methods = all_df['method'].unique()
for k in uniq_methods:
idx = plot_sections.index(k)
del plot_sections[idx]
if len(plot_sections) == 0:
return all_df
api = wandb.Api()
run_ids = get_run_ids_from_report(wb_search, report_name, plot_sections, api)
for report_section, run_id in run_ids:
wbrun = api.run(f"{wb_entity}/{wb_proj_name}/{run_id}")
if match_pat is not None:
any_matches = False
for x in match_pat:
if x in wbrun.name:
any_matches = True
break
if not any_matches:
continue
df = wbrun.history(samples=15000)
if not isinstance(plot_field, str):
orig_not_found = False
for k in plot_field:
if k not in df.columns:
orig_not_found = True
break
if orig_not_found:
if len(other_plot_fields) > 0:
plot_field = other_plot_fields
if get_any_cols:
plot_field = [x for x in plot_field if x in df.columns]
for k in plot_field:
if k not in df.columns:
raise ValueError((f"Requested key {k} is not present in",
f" data frame with {df.columns} for run {run_id}",
f" section {report_section}"))
df = df[['_step', *plot_field]]
else:
if plot_field not in df.columns:
match_other_plot = None
for k in other_plot_fields:
if k in df.columns:
match_other_plot = k
break
if match_other_plot is None:
raise ValueError("""
Could not find colums from %s in %s containing %s
""" % (str(other_plot_fields), report_section, str(df.columns)))
df = df.rename(columns={match_other_plot: plot_field})
df = df[['_step', plot_field]]
if len(other_fetch_fields) > 0:
run_cfg = json.loads(wbrun.json_config)
for k in other_fetch_fields:
parts = k.split('.')
cur_d = run_cfg
for part in parts:
cur_d = cur_d[part]
if isinstance(cur_d, dict):
cur_d = cur_d['value']
df[k] = cur_d
df['method'] = report_section
df['run'] = run_id
if all_df is None:
all_df = df
else:
all_df = | pd.concat([all_df, df]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 30 18:07:56 2020
@author: Fabio
"""
import pandas as pd
import matplotlib.pyplot as plt
def df_filterbydate(df, dataLB, dataUB):
df['Data_Registrazione'] = pd.to_datetime(df['Data_Registrazione'], infer_datetime_format=True).dt.date
df = df[(df['Data_Registrazione'] >= dataLB) & (df['Data_Registrazione'] <= dataUB)]
return df
def get_df_classi(df, soglia=180):
# creo df delle classi non prof == 0/ prof == 1/ molto prof == 2 """
df0 = df[df['profitto'] <= 0]
df1 = df[(df['profitto'] > 0) & (df['profitto'] <= soglia)]
df2 = df[df['profitto'] > soglia]
return df0, df1, df2
def get_classi_cliente(cliente, df, soglia):
# estraggo il df del singolo cliente considerando tutti i suoi produttori
df_cliente = df[df['Produttore'] == cliente]
visit_num = len(df_cliente.index)
df0, df1, df2 = get_df_classi(df_cliente,soglia)
class_0c = len(df0.index) # conto il totale degli ordini non prof
class_1c = len(df1.index) # conto il totale degli ordini prof
class_2c = len(df2.index) # conto il totale degli ordini max prof
class_c = [class_0c, class_1c, class_2c]
class_0p = class_0c / visit_num
class_1p = class_1c / visit_num
class_2p = class_2c / visit_num
class_p = [class_0p, class_1p, class_2p]
class_0s = df0['profitto'].sum() # sommo il totale degli ordini non prof
class_1s = df1['profitto'].sum() # sommo il totale degli ordini prof
class_2s = df2['profitto'].sum() # sommo il totale degli ordini max prof
class_s = [class_0s, class_1s, class_2s]
if (class_0s >= class_1s + class_2s) or (class_0p > class_1p and class_0p > class_2p):
color = 'red' # se non profittevole
classe = 0
elif (class_1p >= class_0p) and (class_1p >= class_2p):
color = 'lightblue' # se profittevole
classe = 1
elif (class_2p >= class_0p) and (class_2p >= class_1p):
color = 'blue' # se ottimo profitto
classe = 2
return class_c, class_p, class_s, color, classe
def get_classi_produttore(cliente, lat, long, df, soglia):
# creo un df per singolo cliente con le coordinate di tutti i suoi produttori
df_produttore = df[(df['Produttore'] == cliente) & (df['lat_P'] == lat) & (df['long_P'] == long)]
visit_num = len(df_produttore.index)
df0, df1, df2 = get_df_classi(df_produttore, soglia)
class_0c = df0['profitto'].count() # conto il totale degli ordini non prof
class_1c = df1['profitto'].count() # conto il totale degli ordini prof
class_2c = df2['profitto'].count() # conto il totale degli ordini max prof
class_c = [class_0c, class_1c, class_2c]
class_0p = class_0c / visit_num
class_1p = class_1c / visit_num
class_2p = class_2c / visit_num
class_p = [class_0p, class_1p, class_2p]
class_0s = df0['profitto'].sum() # sommo il totale degli ordini non prof
class_1s = df1['profitto'].sum() # sommo il totale degli ordini prof
class_2s = df2['profitto'].sum() # sommo il totale degli ordini max prof
class_s = [class_0s, class_1s, class_2s]
if (class_0s >= class_1s + class_2s) or (class_0p > class_1p and class_0p > class_2p):
color = 'red' # se non profittevole
classe = 0
elif (class_1p >= class_0p) and (class_1p >= class_2p):
color = 'blue' # se profittevole
classe = 1
elif (class_2p >= class_0p) and (class_2p >= class_1p):
color = 'darkblue' # se ottimo profitto
classe = 2
return class_c, class_p, class_s, color, classe
def filtro_ordine_peggiore(df,df2): # restituisce i peggiori ordini dei singoli clienti nella classe specificata dal df in input, df2 è il df totale
produttori = df['Produttore'].drop_duplicates().tolist()
ordini_peggiori = []
for produttore in produttori:
""" seleziono i peggiori ordini del produttore sulla base del profitto"""
peggiore = min(df[df['Produttore'] == produttore]['profitto'])
""" estraggo gli indici dei peggiori ordini"""
peggiore_index = df[df['profitto'] == peggiore].index
""" ne estraggo il num fiscale"""
num_ordine = df2.iloc[peggiore_index]['NumFiscale'].values
""" creo una lista con produttore, num ficale e profitto dei peggiori ordini"""
ordini_peggiori.append([produttore, num_ordine, peggiore])
""" creo un df dalla lista"""
df_ordini = pd.DataFrame(data=ordini_peggiori, columns=['Produttore', 'NumFiscale', 'profitto'])
return df_ordini
def filtro_classifica(
df): # da usare con i dataframe df_non_prof\df_prof\df_max_prof, restituisce la classifica dei clienti sulla base del profitto totale
""" creo un df ordinato in modo decrescente sulla base del profitto"""
classifica = df.sort_values(by='profitto', ascending=False)
profitto = classifica['profitto']
produttori = classifica['Produttore']
df_classifica = pd.DataFrame()
df_classifica['Produttore'] = produttori
df_classifica['profitto'] = profitto
return df_classifica
def best_n(df, n):
df_best = df.nlargest(n, 'profitto')
produttori_list = df_best['Produttore'].tolist()
return df_best, produttori_list
def worst_n(df, n):
df_worst = df.nsmallest(n, 'profitto')
produttori_list = df_worst['Produttore'].tolist()
return df_worst, produttori_list
def grafici(df, df2, dfn, dfp, dfo, hist, pie, best,
worst): # dare in input df, dfpneg, dfp, dfpmax e df2 è il df_produttori
""" per scegliere il tipo di grafico inserire un valore al posto di hist/pie (o entrambi) e None su quello non richiesto, stessa cosa per scegliere se peggiori o migliori, ma non entrambi"""
ax_i = []
ax_t = []
"""blocco di if/elif per verificare la scelta del tipo di grafico """
if (pd.isna(hist) == False) & (pd.isna(pie) == False):
"""blocco di if/elif per verificare la scelta tra peggiori e migliori """
if pd.isna(best) == False:
produttori = best_n(df2, 10)
elif pd.isna(worst) == False:
produttori = worst_n(df2, 10)
else:
produttori = []
"""blocco di if/elif per verificare la scelta del tipo di grafico """
for produttore in produttori:
""" per l'istogramma seleziono il produttore e estraggo la serie delle commesse nel df ed eseguo il plot"""
serie = df[df['Produttore'] == produttore]['profitto']
figure, axes = plt.subplots(1, 1)
plt.title(produttore)
ax = serie.plot.hist()
plt.ylabel('numero commesse')
ax_i.append([produttore, ax])
""" per la torta seleziono il produttore ed estraggo la serie delle commesse in df_pneg, df_p e df_pmax"""
serie_neg = dfn[dfn['Produttore'] == produttore]['profitto']
serie_prof = dfp[dfp['Produttore'] == produttore]['profitto']
serie_max = dfo[dfo['Produttore'] == produttore]['profitto']
""" eseguo il plot sul numero di volte che il produttore compare nelle singole classi """
y = [len(serie_neg), len(serie_prof), len(serie_max)]
label = ['non profittevoli', 'profittevoli', 'ottimo profitto']
figure, ax = plt.subplots()
plt.title(produttore)
ax.pie(y)
plt.legend(label, loc="best")
ax_t.append([produttore, ax])
elif pd.isna(hist) == False:
if pd.isna(best) == False:
produttori = best_n(df2, 10)
elif pd.isna(worst) == False:
produttori = worst_n(df2, 10)
else:
produttori = []
for produttore in produttori:
serie = df[df['Produttore'] == produttore]['profitto']
figure, axes = plt.subplots(1, 1)
plt.title(produttore)
ax = serie.plot.hist()
plt.ylabel('numero commesse')
ax_i.append([produttore, ax])
elif | pd.isna(pie) | pandas.isna |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--format', default='forex', choices=['forex', 'stock'], help="csv format")
parser.add_argument('-s', '--source', required=True, help="Row forex text file path")
parser.add_argument('-o', '--output', required=True, help="Output data csv file path")
parser.add_argument('-p', '--period', required=True, help="Time period(minutes) of the data:\n"
"1T: 1 minute\n"
"1H:1 hour\n;"
"1D: 1 day\n"
"1W: 1 week\n"
"1M: 1 month\n"
"1A: 1 year")
args = parser.parse_args()
source = args.source
output = args.output
period = args.period
df = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', source))
df['Raw_time'] = df['Day'].apply(str) + df['Time'].apply(str).apply(lambda x: x.zfill(6))
df['Time'] = pd.to_datetime(df['Raw_time'], format='%Y%m%d%H%M%S')
# df['Timestamp'] = df['Timestamp'].astype('int64') // 1e9
df['Time'] = pd.to_datetime(df['Time'], unit='s')
df.index = df['Time'].tolist()
# columns = ['Timestamp', 'Open', 'High', 'Low', 'Close', 'Volume', 'Volume_(Currency)', 'Weighted_Price']
columns = ['Time', 'Open', 'High', 'Low', 'Close', 'Volume']
df_out = | pd.DataFrame(columns=columns) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/2 23:26
Desc: 东方财富网-行情首页-沪深京 A 股
"""
import requests
import pandas as pd
def stock_zh_a_spot_em() -> pd.DataFrame:
"""
东方财富网-沪深京 A 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://82.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80,m:1 t:2,m:1 t:23,m:0 t:81 s:2048",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["量比"] = pd.to_numeric(temp_df["量比"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
def stock_zh_b_spot_em() -> pd.DataFrame:
"""
东方财富网- B 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://28.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:7,m:1 t:3",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["量比"] = pd.to_numeric(temp_df["量比"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
def code_id_map_em() -> dict:
"""
东方财富-股票和市场代码
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 股票和市场代码
:rtype: dict
"""
url = "http://80.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:1 t:2,m:1 t:23",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df["market_id"] = 1
temp_df.columns = ["sh_code", "sh_id"]
code_id_dict = dict(zip(temp_df["sh_code"], temp_df["sh_id"]))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["sz_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["sz_id"])))
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:81 s:2048",
"fields": "f12",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return dict()
temp_df_sz = pd.DataFrame(data_json["data"]["diff"])
temp_df_sz["bj_id"] = 0
code_id_dict.update(dict(zip(temp_df_sz["f12"], temp_df_sz["bj_id"])))
return code_id_dict
def stock_zh_a_hist(
symbol: str = "000001",
period: str = "daily",
start_date: str = "19700101",
end_date: str = "20500101",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param period: choice of {'daily', 'weekly', 'monthly'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param adjust: choice of {"qfq": "前复权", "hfq": "后复权", "": "不复权"}
:type adjust: str
:return: 每日行情
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
adjust_dict = {"qfq": "1", "hfq": "2", "": "0"}
period_dict = {"daily": "101", "weekly": "102", "monthly": "103"}
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61,f116",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"klt": period_dict[period],
"fqt": adjust_dict[adjust],
"secid": f"{code_id_dict[symbol]}.{symbol}",
"beg": start_date,
"end": end_date,
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["klines"]:
return pd.DataFrame()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["日期"])
temp_df.reset_index(inplace=True, drop=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
return temp_df
def stock_zh_a_hist_min_em(
symbol: str = "000001",
start_date: str = "1979-09-01 09:32:00",
end_date: str = "2222-01-01 09:32:00",
period: str = "5",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日分时行情
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param period: choice of {'1', '5', '15', '30', '60'}
:type period: str
:param adjust: choice of {'', 'qfq', 'hfq'}
:type adjust: str
:return: 每日分时行情
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
adjust_map = {
"": "0",
"qfq": "1",
"hfq": "2",
}
if period == "1":
url = "https://push2his.eastmoney.com/api/qt/stock/trends2/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"ndays": "5",
"iscr": "0",
"secid": f"{code_id_dict[symbol]}.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
return temp_df
else:
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"ut": "7eea3edcaed734bea9cbfc24409ed989",
"klt": period,
"fqt": adjust_map[adjust],
"secid": f"{code_id_dict[symbol]}.{symbol}",
"beg": "0",
"end": "20500000",
"_": "1630930917857",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
temp_df = temp_df[
[
"时间",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
return temp_df
def stock_zh_a_hist_pre_min_em(
symbol: str = "000001",
start_time: str = "09:00:00",
end_time: str = "15:50:00",
) -> pd.DataFrame:
"""
东方财富网-行情首页-沪深京 A 股-每日分时行情包含盘前数据
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:param start_time: 开始时间
:type start_time: str
:param end_time: 结束时间
:type end_time: str
:return: 每日分时行情包含盘前数据
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
url = "https://push2.eastmoney.com/api/qt/stock/trends2/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"ndays": "1",
"iscr": "1",
"iscca": "0",
"secid": f"{code_id_dict[symbol]}.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
date_format = temp_df.index[0].date().isoformat()
temp_df = temp_df[
date_format + " " + start_time : date_format + " " + end_time
]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
return temp_df
def stock_hk_spot_em() -> pd.DataFrame:
"""
东方财富网-港股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hk_stocks
:return: 港股-实时行情
:rtype: pandas.DataFrame
"""
url = "http://72.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:128 t:3,m:128 t:4,m:128 t:1,m:128 t:2",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1624010056945",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"今开",
"最高",
"最低",
"昨收",
"成交量",
"成交额",
]
]
temp_df["序号"] = pd.to_numeric(temp_df["序号"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
return temp_df
def stock_hk_hist(
symbol: str = "40224",
period: str = "daily",
start_date: str = "19700101",
end_date: str = "22220101",
adjust: str = "",
) -> pd.DataFrame:
"""
东方财富网-行情-港股-每日行情
http://quote.eastmoney.com/hk/08367.html
:param symbol: 港股-每日行情
:type symbol: str
:param period: choice of {'daily', 'weekly', 'monthly'}
:type period: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:param adjust: choice of {"qfq": "1", "hfq": "2", "": "不复权"}
:type adjust: str
:return: 每日行情
:rtype: pandas.DataFrame
"""
adjust_dict = {"qfq": "1", "hfq": "2", "": "0"}
period_dict = {"daily": "101", "weekly": "102", "monthly": "103"}
url = "http://33.push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"secid": f"116.{symbol}",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": period_dict[period],
"fqt": adjust_dict[adjust],
"end": "20500000",
"lmt": "1000000",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
if temp_df.empty:
return pd.DataFrame()
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["日期"])
temp_df = temp_df[start_date:end_date]
if temp_df.empty:
return pd.DataFrame()
temp_df.reset_index(inplace=True, drop=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
return temp_df
def stock_hk_hist_min_em(
symbol: str = "01611",
period: str = "1",
adjust: str = "",
start_date: str = "1979-09-01 09:32:00",
end_date: str = "2222-01-01 09:32:00",
) -> pd.DataFrame:
"""
东方财富网-行情-港股-每日分时行情
http://quote.eastmoney.com/hk/00948.html
:param symbol: 股票代码
:type symbol: str
:param period: choice of {'1', '5', '15', '30', '60'}
:type period: str
:param adjust: choice of {'', 'qfq', 'hfq'}
:type adjust: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 每日分时行情
:rtype: pandas.DataFrame
"""
adjust_map = {
"": "0",
"qfq": "1",
"hfq": "2",
}
if period == "1":
url = "http://push2his.eastmoney.com/api/qt/stock/trends2/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"iscr": "0",
"ndays": "5",
"secid": f"116.{symbol}",
"_": "1623766962675",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["trends"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"最新价",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
return temp_df
else:
url = "http://push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"klt": period,
"fqt": adjust_map[adjust],
"secid": f"116.{symbol}",
"beg": "0",
"end": "20500000",
"_": "1630930917857",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[item.split(",") for item in data_json["data"]["klines"]]
)
temp_df.columns = [
"时间",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df.index = pd.to_datetime(temp_df["时间"])
temp_df = temp_df[start_date:end_date]
temp_df.reset_index(drop=True, inplace=True)
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
temp_df["时间"] = pd.to_datetime(temp_df["时间"]).astype(str)
temp_df = temp_df[
[
"时间",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
return temp_df
def stock_us_spot_em() -> pd.DataFrame:
"""
东方财富-美股-实时行情
http://quote.eastmoney.com/center/gridlist.html#us_stocks
:return: 美股-实时行情; 延迟 15 min
:rtype: pandas.DataFrame
"""
url = "http://72.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "20000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:105,m:106,m:107",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f26,f22,f33,f11,f62,f128,f136,f115,f152",
"_": "1624010056945",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"_",
"_",
"_",
"简称",
"编码",
"名称",
"最高价",
"最低价",
"开盘价",
"昨收价",
"总市值",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"市盈率",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df["代码"] = temp_df["编码"].astype(str) + "." + temp_df["简称"]
temp_df = temp_df[
[
"序号",
"名称",
"最新价",
"涨跌额",
"涨跌幅",
"开盘价",
"最高价",
"最低价",
"昨收价",
"总市值",
"市盈率",
"成交量",
"成交额",
"振幅",
"换手率",
"代码",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["开盘价"] = pd.to_numeric(temp_df["开盘价"], errors="coerce")
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"], errors="coerce")
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"], errors="coerce")
temp_df["昨收价"] = pd.to_numeric(temp_df["昨收价"], errors="coerce")
temp_df["总市值"] = pd.to_ | numeric(temp_df["总市值"], errors="coerce") | pandas.to_numeric |
import pyspark
from pyspark.sql import SQLContext
import pandas as pd
import csv
import os
def load_states():
# read US states
f = open('states.txt', 'r')
states = set()
for line in f.readlines():
l = line.strip('\n')
if l != '':
states.add(l)
return states
def validate2(states, bt):
#sqlContext = SQLContext(sc)
for state in states:
if not os.path.exists("US/" + state):
continue
"""
Train
"""
train_prefix = "US/" + state + '/' + bt + "/train/" + state + "_train_"
business_train_fname = train_prefix + 'yelp_academic_dataset_business.csv'
business_train_fname2 = train_prefix + 'yelp_academic_dataset_business2.csv'
review_train_fname = train_prefix + 'yelp_academic_dataset_review.csv'
checkins_train_fname = train_prefix + 'yelp_academic_dataset_checkin.csv'
tip_train_fname = train_prefix + 'yelp_academic_dataset_tip.csv'
user_train_fname = train_prefix + 'yelp_academic_dataset_user.csv'
df_business_train = pd.read_csv(business_train_fname)
df_review_train = pd.read_csv(review_train_fname)
df_checkins_train = pd.read_csv(checkins_train_fname)
df_tip_train = pd.read_csv(tip_train_fname)
df_user_train = pd.read_csv(user_train_fname)
count_business_train = df_business_train.shape[0]
count_review_train = df_review_train.shape[0]
count_checkins_train = df_checkins_train.shape[0]
count_tip_train = df_tip_train.shape[0]
count_user_train = df_user_train.shape[0]
df_train_busi_review_count = df_review_train.groupby(['business_id']).agg(['count'])
dict_train_busi_review_count = df_train_busi_review_count['review_id'].apply(list).to_dict()['count']
new_pdf_train_busi_review_count = | pd.DataFrame.from_dict(dict_train_busi_review_count, orient='index') | pandas.DataFrame.from_dict |
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import os
import pandas as pd
import json
HDF5_STORE_FORMAT = 'fixed'
class TestData:
def __init__(self):
self.test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
self.test_data_json_dir = os.path.join(os.path.dirname(__file__), 'data/results-test')
self.mock_metadata_filename = 'sample_metadata.json'
self.mock_metadata_filepath = os.path.join(self.test_data_dir, self.mock_metadata_filename)
self.json_dict_metadata = DataFrameFileIO.import_data_from_json(self.mock_metadata_filepath)
self.rule_scans = self.get_scans_from_folder("rule")
self.lic_scans = self.get_scans_from_folder("lic")
def get_scans_from_folder(self, folder_name):
data_path = os.path.join(self.test_data_json_dir, folder_name)
files_all = []
for (dirpath, dirnames, filenames) in os.walk(data_path):
filenames.sort()
files_all.extend(filenames)
json_dict_metadata = DataFrameFileIO.import_data_from_json(self.mock_metadata_filepath)
mock_path = pd.Series(["mock/data/-/multiple-packages/random/1.0.0/tool/scancode/3.2.2.json"])
packages_all = []
for file in files_all:
json_filepath = os.path.join(data_path, file)
json_dict_content = DataFrameFileIO.import_data_from_json(json_filepath)
json_dict = pd.Series([{"_metadata": json_dict_metadata, "content": json_dict_content}])
json_df = pd.DataFrame({"path": mock_path, "json_content": json_dict})
packages_all.append(json_df)
pkg_dataframe = pd.concat(packages_all)
return pkg_dataframe
class DataFrameFileIO:
def __init__(self):
self.data_dir = os.path.join(os.path.dirname(__file__), 'data')
self.metadata_filename = 'projects_metadata.h5'
self.mock_metadata_filename = 'sample_metadata.json'
self.hdf_dir = os.path.join(os.path.dirname(__file__), 'data/hdf5/')
self.json_input_dir = os.path.join(os.path.dirname(__file__), 'data/json-scan-results/')
self.from_scancode_dir = os.path.join(os.path.dirname(__file__), 'data/from-scancode/')
self.path_scancode_folders_json = os.path.join(os.path.dirname(__file__), 'data/rule_lic_folder_paths.json')
@staticmethod
def import_data_from_json(file_path):
"""
Fetch postgres Database credentials.
:returns credentials: JSON dict with credentials
"""
with open(file_path) as f:
json_dict = json.load(f)
return json_dict
def load_folder_names(self):
files = self.import_data_from_json(self.path_scancode_folders_json)
lic_folder_path = os.path.join(files["lic_folder"])
rule_folder_path = os.path.join(files["rule_folder"])
return lic_folder_path, rule_folder_path
@staticmethod
def get_hdf5_file_path(hdf_dir, filename):
"""
Gets filepath.
:param hdf_dir : string
:param filename : string
:returns filepath : os.Path
"""
file_path = os.path.join(hdf_dir, filename)
return file_path
# ToDo: Support Selective Query/Search
@staticmethod
def load_dataframe_from_hdf5(file_path, df_key):
"""
Loads data from the hdf5 to a Pandas Dataframe.
:param file_path : string
:param df_key : string
:returns filepath : pd.DataFrame object containing the Data read from the hdf5 file.
"""
dataframe = | pd.read_hdf(path_or_buf=file_path, key=df_key) | pandas.read_hdf |
import pytest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
import dask.dataframe as dd
from dask_sql.utils import ParsingException
def test_select(c, df):
result_df = c.sql("SELECT * FROM df")
result_df = result_df.compute()
assert_frame_equal(result_df, df)
def test_select_alias(c, df):
result_df = c.sql("SELECT a as b, b as a FROM df")
result_df = result_df.compute()
expected_df = pd.DataFrame(index=df.index)
expected_df["b"] = df.a
expected_df["a"] = df.b
assert_frame_equal(result_df[["a", "b"]], expected_df[["a", "b"]])
def test_select_column(c, df):
result_df = c.sql("SELECT a FROM df")
result_df = result_df.compute()
assert_frame_equal(result_df, df[["a"]])
def test_select_different_types(c):
expected_df = pd.DataFrame(
{
"date": pd.to_datetime(["2022-01-21 17:34", "2022-01-21", "17:34", pd.NaT]),
"string": ["this is a test", "another test", "äölüć", ""],
"integer": [1, 2, -4, 5],
"float": [-1.1, np.NaN, pd.NA, np.sqrt(2)],
}
)
c.create_table("df", expected_df)
df = c.sql(
"""
SELECT *
FROM df
"""
)
df = df.compute()
assert_frame_equal(df, expected_df)
def test_select_expr(c, df):
result_df = c.sql("SELECT a + 1 AS a, b AS bla, a - 1 FROM df")
result_df = result_df.compute()
expected_df = pd.DataFrame(
{"a": df["a"] + 1, "bla": df["b"], '"df"."a" - 1': df["a"] - 1,}
)
assert_frame_equal(result_df, expected_df)
def test_select_of_select(c, df):
result_df = c.sql(
"""
SELECT 2*c AS e, d - 1 AS f
FROM
(
SELECT a - 1 AS c, 2*b AS d
FROM df
) AS "inner"
"""
)
result_df = result_df.compute()
expected_df = pd.DataFrame({"e": 2 * (df["a"] - 1), "f": 2 * df["b"] - 1})
assert_frame_equal(result_df, expected_df)
def test_select_of_select_with_casing(c, df):
result_df = c.sql(
"""
SELECT AAA, aaa, aAa
FROM
(
SELECT a - 1 AS aAa, 2*b AS aaa, a + b AS AAA
FROM df
) AS "inner"
"""
)
result_df = result_df.compute()
expected_df = pd.DataFrame(
{"AAA": df["a"] + df["b"], "aaa": 2 * df["b"], "aAa": df["a"] - 1}
)
| assert_frame_equal(result_df, expected_df) | pandas.testing.assert_frame_equal |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue( | tm.equalContents(union, everything) | pandas.util.testing.equalContents |
# clean SG weather data
import os.path
import sys
import pandas as pd
import logging
INPUT_DIR = '../../Data/raw/weather_SG'
OUTPUT_DIR = '../../Data/interim/weather_SG'
OUTPUT_FILE = "weekly-weather.csv"
DICT_RENAME={'Station':'location',
'Year':'year', 'Month':'month', 'Day':'day',
'Daily Rainfall Total (mm)':'Rainfall Total',
'Highest 30 Min Rainfall (mm)':'Max 30Min Rainfall',
'Highest 60 Min Rainfall (mm)':'Max 60Min Rainfall',
'Highest 120 Min Rainfall (mm)':'Max 120Min Rainfall',
'Mean Temperature (°C)':'Mean Temperature',
'Maximum Temperature (°C)':'Max Temperature',
'Minimum Temperature (°C)':'Min Temperature',
'Mean Wind Speed (km/h)':'Mean Wind Speed',
'Max Wind Speed (km/h)':'Max Wind Speed'}
COLS_RENAMED = ['location', 'year', 'week', 'month', 'day', 'Rainfall Total',
'Max 30Min Rainfall', 'Max 60Min Rainfall',
'Max 120Min Rainfall', 'Mean Temperature',
'Max Temperature', 'Min Temperature',
'Mean Wind Speed', 'Max Wind Speed']
COL_NUM = ['year', 'month', 'day', 'Rainfall Total',
'Max 30Min Rainfall', 'Max 60Min Rainfall',
'Max 120Min Rainfall', 'Mean Temperature',
'Max Temperature', 'Min Temperature',
'Mean Wind Speed', 'Max Wind Speed']
APPLY_LOGIC = {
'Rainfall Total' : 'sum',
'Max 30Min Rainfall' : 'max',
'Max 60Min Rainfall' : 'max',
'Max 120Min Rainfall' : 'max',
'Mean Temperature' : 'mean',
'Max Temperature' : 'max',
'Min Temperature' : 'min',
'Mean Wind Speed' : 'mean',
'Max Wind Speed' : 'max'
}
logger = logging.getLogger(__name__)
def clean():
files = os.listdir(INPUT_DIR)
os.makedirs(OUTPUT_DIR, exist_ok=True)
stations = [x.split('_')[0] for x in files]
stations = list(set(stations))
for station in stations:
station_files = [x for x in files if x.startswith(station)]
station_files.sort()
dfWeeklyWeather = pd.DataFrame(columns=COLS_RENAMED)
dfRemain = | pd.DataFrame(columns=COLS_RENAMED) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# author:zhengk
import pandas as pd
from pandas.plotting import register_matplotlib_converters
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
# 数据分析
def pandas_analysis():
# 读取评论
df = pd.read_csv('comment.csv', sep=';', header=None)
# 整理数据
df.columns = ['date', 'comment']
df['date'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
import sys
sys.path.append("../")
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.linalg as ln
from openpyxl import Workbook
import xlsxwriter as xlsx
import pickle
############## Read data and convert to dictionary ###############################################
data_list=['conference','hospital','primary_school','workplace','high_school']
for data in data_list:
original_df=pd.read_csv('../data/'+data+'.txt', sep='\t', header=None, names=['ID1','ID2','start_time','end_time'])
reverse_df= | pd.read_csv('../data/'+data+'.txt', sep='\t', header=None, names=['ID2','ID1','start_time','end_time']) | pandas.read_csv |
import pandas as pd
import numpy as np
import scipy
import os, sys, time, json, math
import matplotlib.pyplot as plt
import seaborn as sns
from functools import reduce
from os.path import join
from datetime import datetime
from scipy.integrate import odeint
from numpy import loadtxt
from scipy.optimize import minimize
rootDir = os.path.abspath(os.path.curdir)
print(rootDir)
sys.path.insert(0, os.path.join(rootDir, 'lib'))
## use JD's optimizer
#from systemSolver import optimizer as optimizer
from optimizer import Differential_Evolution
from getPatientData import getPatientData
import copy
from matplotlib.font_manager import FontProperties
#riskConfig = json.load(open('amgen-risk-model/amgen-risk-model/config/riskModel_3y_LipidCxoOptimized.json'))
# classConfig = json.load(open(riskConfig['patientClassConfig']))
classConfig = json.load(open('../config/lipidoptimizing.json'))
def differentialequations(I, t, p):
'''
This function has the differential equations of the lipids (LDL,
Total Cholesterol, Triglyceride, HDL)
Inputs:
I: Initial conditions
t: timepoints
p: parameters
'''
try:
# Initial conditions
Cldl, Cchol, Ctrig, Chdl = I
# Parameters
adherence, dose, Imaxldl, Imaxchol, Imaxtrig, Imaxhdl, Ic50, n, dx, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl = p
t = np.round(t)
t = t.astype(int)
# print dose.shape
if t > (dose.shape[0] - 1):
t = (dose.shape[0] - 1)
div = (Ic50+(dose[t]*adherence[t])**n)
h0 = ((dose[t] * adherence[t])**n)
# llipid equation
dCldldt = (Sx0ldl * (1 - np.sum((Imaxldl*h0)/div))) - (dx*Cldl)
dCcholdt = (Sx0chol * (1 - np.sum((Imaxchol*h0)/div))) - (dx*Cchol)
dCtrigdt = (Sx0trig * (1 - np.sum((Imaxtrig*h0)/div))) - (dx*Ctrig)
dChdldt = (Sx0hdl * (1 + np.sum((Imaxhdl*h0)/div))) - (dx*Chdl)
f = [dCldldt, dCcholdt, dCtrigdt, dChdldt]
return f
except Exception as e:
# print 'There was some problem with the differentialequations function: {}'.format(e)
print(dose.shape, t)
raise
def differential_solve(adherence, t, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose):
'''
This function solves the differential equations with odeint
Inputs:
adherence: patient's adherence for all the statins, 2-d numpy array
t: timepoints
Sx0: synthesis terms for all the lipids
C0: baseline values for all the lipids
dose: doses for all the statins, 2-d numpy array
'''
try:
dx = math.log(2)/14
ldl_eff = np.load('../data/final/Efficacy/ldl_efficacy.npy')
chol_eff = np.load('../data/final/Efficacy/tc_efficacy.npy')
trig_eff = np.load('../data/final/Efficacy/trig_efficacy.npy')
hdl_eff = np.load('../data/final/Efficacy/hdl_efficacy.npy')
Imaxldl = ldl_eff[0]
Imaxchol = chol_eff[0]
Imaxtrig = trig_eff[0]
Imaxhdl = hdl_eff[0]
# Imaxldl, Imaxchol, Imaxtrig, Imaxhdl = np.array([0,0,0,0,0,0]), np.array([0,0,0,0,0,0]), np.array([0,0,0,0,0,0]), np.array([0,0,0,0,0,0])
Ic50 = ldl_eff[1]
n = 0.7
I0 = [Cldl0, Cchol0, Ctrig0, Chdl0]
p = [adherence, dose, Imaxldl, Imaxchol, Imaxtrig, Imaxhdl, Ic50, n, dx, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl]
sol = odeint(differentialequations, I0, t, args = (p,))
# print(sol)
Cldl = []
Cchol = []
Ctrig = []
Chdl = []
for s1 in sol:
Cldl.append(s1[0])
Cchol.append(s1[1])
Ctrig.append(s1[2])
Chdl.append(s1[3])
# print(Cldl)
return Cldl, Cchol, Ctrig, Chdl
except Exception as e:
# print('There was some problem with the differential_solve function: {}'.format(e))
raise
def adherence_coding(adherence, periods):
''' This function takes the adherence and identifies where it is -1 and returns the pairs
of rows and columns, number of windows and the flag
Parameters
----------
adhrenece : {2-d numpy array for each patient}
It has the adherence values for all the medications for each day
periods_total : {1-d numpy array}
It has the
Returns
-------
[type]
[description]
'''
try:
# print(periods_total)
period_nonzero = periods[periods!=0]
row, col = np.where(adherence==-1)
pairs = list(map(list, zip(row, col)))
windows = len(np.where(np.roll(period_nonzero,1)!=period_nonzero)[0])
if windows == 0:
windows = 1
else:
windows = windows
return pairs, windows, period_nonzero
except Exception as e:
print('There was some problem with the adherence_coding function: {}'.format(e))
def adherence_guess(adherence, pairs, values, flag):
try:
for i in range(len(flag)):
l = pairs[i]
adherence[l[0]][l[1]] = values[flag[i]-1]
return adherence
except Exception as e:
# print 'There was some problem with the adherence_guess function: {}'.format(e)
raise
def h0_cal(dose, Imax, Ic50, n, adherence):
try:
h0 = (Imax*((dose*adherence)**n))/(Ic50 + ((dose*adherence)**n))
if all(np.isnan(h0)):
h0[:] = 0
h0_dictionary = {'Atorvastatin':h0[0], 'Fluvastatin':h0[1], 'Lovastatin':h0[2],
'Pravastatin':h0[3], 'Rosuvastatin':h0[4], 'Simvastatin':h0[5]}
# print(h0_dictionary)
return h0_dictionary
except Exception as e:
print('There was some problem with the h0_cal function: {}'.format(e))
def rmse_function(real_data,real_time,max_value, t, ode_solution):
try:
real_time = np.array(real_time)
weight = (1/max_value)**2
indices = []
for j in real_time:
k = np.where(t == j)[0][0]
# print(k)
indices.append(k)
ode_final_values = np.array(ode_solution)[indices]
# print(indices)
# quit()
# print(ode_final_values)
rmse = np.average(weight*((ode_final_values - np.array(real_data))**2))
return rmse
except Exception as e:
print('There was some problem with the rmse_function function: {}'.format(e))
def get_total_rmse_nonNorm(adherence, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose, t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl,t):
try:
ldl_max = max(ldl)
tc_max = max(tc)
# if len(trig)>0:
# trig_max = max(trig)
# else:
# trig_max = 1
trig_max = 1 # max(trig)
hdl_max = 1 # max(hdl)
Cldl, Cchol, Ctrig, Chdl = differential_solve(adherence, t, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose)
rmse_ldl = rmse_function(ldl, t_ldl, 1, t, Cldl)
rmse_tc = rmse_function(tc, t_tc, 1, t, Cchol)
# rmse_trig = rmse_function(trig, t_trig, trig_max, t, Ctrig)
rmse_trig = 0
rmse_hdl = 0 #rmse_function(hdl, t_hdl, 1, t, Chdl)
rmse_total = rmse_ldl + rmse_tc + (rmse_trig * 0) + rmse_hdl
return rmse_total
except Exception as e:
# print 'There was some problem with the get_total_rmse function: {}'.format(e)
raise
def get_total_rmse(x, pairs, windows, period_nonzero, adherence, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose, t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl,t,count, biomarker,pre_adherence,prestatin, statintype, statin_dose):
try:
values_adherence = x[0:windows]
if count > 0:
values_biomarker = x[windows:]
for i in range(count):
if biomarker[i] == 'ldl':
Cldl0 = values_biomarker[i]
if biomarker[i] == 'chol':
Cchol0 = values_biomarker[i]
if biomarker[i] == 'trig':
Ctrig0 = values_biomarker[i]
if biomarker[i] == 'hdl':
Chdl0 = values_biomarker[i]
if biomarker[i] == 'pre_adherence':
pre_adherence = values_biomarker[i]
if biomarker[i] == 'alpha':
alpha = values_biomarker[i]
if 'alpha' in biomarker:
Cldl0 = Cldl0 * alpha
Cchol0 = Cchol0 * alpha
Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0 = synthesis_calculation(Cldl0, Cchol0, Ctrig0, Chdl0, prestatin, statintype, statin_dose, pre_adherence)
adherence = adherence_guess(adherence, pairs, values_adherence, period_nonzero)
ldl_max = max(ldl)
tc_max = max(tc)
# if len(trig)>0:
# trig_max = max(trig)
# else:
# trig_max = 1
trig_max = 1 #max(trig)
hdl_max = 1 #max(hdl)
Cldl, Cchol, Ctrig, Chdl = differential_solve(adherence, t, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose)
rmse_ldl = rmse_function(ldl, t_ldl, ldl_max, t, Cldl)
rmse_tc = rmse_function(tc, t_tc, tc_max, t, Cchol)
# rmse_trig = rmse_function(trig, t_trig, trig_max, t, Ctrig)
rmse_trig = 0
rmse_hdl = 0 #rmse_function(hdl, t_hdl, hdl_max, t, Chdl)
rmse_total = (1.2 * rmse_ldl) + rmse_tc + (rmse_trig * 0) +rmse_hdl
return rmse_total
except Exception as e:
# print 'There was some problem with the get_total_rmse function: {}'.format(e)
raise
def synthesis_calculation(Cldl0, Cchol0, Ctrig0, Chdl0, prestatin, statintype, statin_dose, pre_adherence):
try:
ldl_eff = np.load('../data/final/Efficacy/ldl_efficacy.npy')
chol_eff = np.load('../data/final/Efficacy/tc_efficacy.npy')
trig_eff = np.load('../data/final/Efficacy/trig_efficacy.npy')
hdl_eff = np.load('../data/final/Efficacy/hdl_efficacy.npy')
n = 0.7
dx = math.log(2)/14
if pd.isnull(Cldl0) | pd.isnull(Cchol0) | pd.isnull(Ctrig0) | pd.isnull(Chdl0):
print(Cldl0, Cchol0, Ctrig0, Chdl0, prestatin, statintype, statin_dose)
Cldl0, Cchol0, Ctrig0, Chdl0 = baseline_map(Cldl0, Cchol0, Ctrig0, Chdl0, prestatin, statintype, statin_dose)
if prestatin:
Sx0ldl = (dx*Cldl0)/(1-h0_cal(statin_dose, ldl_eff[0], ldl_eff[1], n, pre_adherence)[statintype])
Sx0chol = (dx*Cchol0)/(1-h0_cal(statin_dose, chol_eff[0], chol_eff[1], n, pre_adherence)[statintype])
Sx0trig = (dx*Ctrig0)/(1-h0_cal(statin_dose, trig_eff[0], trig_eff[1], n, pre_adherence)[statintype])
Sx0hdl = (dx*Chdl0)/(1-h0_cal(statin_dose, hdl_eff[0], hdl_eff[1], n, pre_adherence)[statintype])
else:
Sx0ldl = (dx*Cldl0)
Sx0chol = (dx*Cchol0)
Sx0trig = (dx*Ctrig0)
Sx0hdl = (dx*Chdl0)
# print(Cldl0, Cchol0, Ctrig0, Chdl0)
return Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0
except Exception as e:
# print 'There was some problem with the synthesis_calculation function: {}'.format(e)
raise
def baseline_map(Cldl0, Cchol0, Ctrig0, Chdl0, prestatin, statintype, statin_dose):
try:
ldl = {'Atorvastatin': {'5': 0.31, '10': 0.37, '15': 0.40, '20': 0.43, '30': 0.46,'40': 0.49, '45': 0.50, '50': 0.51, '60': 0.52, '70': np.nan, '80': 0.55},
'Fluvastatin': {'5': 0.10, '10': 0.15, '15': np.nan, '20': 0.21, '30': np.nan, '40': 0.27, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.33},
'Lovastatin': {'5': np.nan, '10': 0.21 , '15': np.nan, '20': 0.29, '30': 0.33, '40': 0.37, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.45},
'Pravastatin': {'5': 0.15, '10': 0.2, '15': np.nan, '20': 0.24, '30': 0.27, '40': 0.29, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.33},
'Rosuvastatin': {'5': 0.38, '10': 0.43, '15': 0.46, '20': 0.48, '30': 0.51, '40': 0.53, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.58},
'Simvastatin': {'5': 0.23, '10': 0.27, '15': 0.3, '20': 0.32, '30': 0.35, '40': 0.37, '45': 0.38, '50': 0.38, '60': 0.4, '70': 0.41, '80': 0.42}}
tc = {'Atorvastatin': {'5': 0.24, '10': 0.29, '15': 0.31, '20': 0.33, '30': 0.36, '40': 0.38, '45': 0.39, '50': 0.39, '60': 0.4, '70': np.nan, '80': 0.43},
'Fluvastatin': {'5': 0.07, '10': 0.12, '15': np.nan, '20': 0.17, '30': np.nan, '40': 0.21, '45': np.nan, '50': np.nan, '60': np.nan, '70':np.nan, '80': 0.26},
'Lovastatin': {'5': np.nan, '10': 0.17, '15': np.nan, '20': 0.23, '30': 0.26, '40': 0.29, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.35},
'Pravastatin': {'5': 0.12, '10': 0.15, '15': np.nan, '20': 0.19, '30': 0.21, '40': 0.22, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.26},
'Rosuvastatin': {'5': 0.3, '10': 0.34, '15': 0.36, '20': 0.38, '30': 0.39, '40': 0.41, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.45},
'Simvastatin': {'5': 0.17, '10': 0.21, '15': 0.23, '20': 0.25, '30': 0.27, '40': 0.29, '45': np.nan, '50': 0.3, '60': 0.31, '70': 0.32, '80': 0.33}}
trig = {'Atorvastatin': {'5': 0.16, '10': 0.19, '15': 0.2, '20': 0.21, '30': 0.23, '40': 0.25, '45': 0.25, '50': 0.25, '60': 0.26, '70': np.nan, '80': 0.27},
'Fluvastatin': {'5': 0.05, '10': 0.08, '15': np.nan, '20': 0.11, '30': np.nan, '40': 0.14, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.16},
'Lovastatin': {'5': np.nan, '10': 0.11, '15': np.nan, '20': 0.15, '30': 0.16, '40': 0.18, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.22},
'Pravastatin': {'5': 0.08, '10': 0.10, '15': np.nan, '20': 0.12, '30': 0.13, '40': 0.14, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.17},
'Rosuvastatin': {'5': 0.19, '10': 0.22, '15': 0.23, '20': 0.24, '30': 0.25, '40': 0.27, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.29},
'Simvastatin': {'5': 0.11, '10': 0.14, '15': 0.15, '20': 0.16, '30': 0.17, '40': 0.18, '45': np.nan, '50': 0.19, '60': 0.20, '70': 0.20, '80': 0.21}}
hdl = {'Atorvastatin': {'5': 1.0, '10': 1.0, '15': 1.0, '20': 1.0, '30': 1.0, '40': 1.0, '45': 1.0, '50': 1.0, '60': 1.0, '70':1.0, '80': 1.0},
'Fluvastatin': {'5': 1.0, '10': 1.0, '15': 1.0, '20': 1.0, '30': 1.0, '40': 1.0, '45': 1.0, '50': 1.0, '60': 1.0, '70': 1.0, '80': 1.0},
'Lovastatin': {'5': 1.0, '10': 1.0, '15': 1.0, '20': 1.0, '30': 1.0, '40': 1.0, '45': 1.0, '50': 1.0, '60': 1.0, '70': 1.0, '80': 1.0},
'Pravastatin': {'5': 1.0, '10': 1.0, '15': 1.0, '20': 1.0, '30': 1.0, '40': 1.0, '45': 1.0, '50': 1.0, '60': 1.0, '70': 1.0, '80': 1.0},
'Rosuvastatin': {'5': 1.0, '10': 1.0, '15': 1.0, '20': 1.0, '30': 1.0, '40': 1.0, '45': 1.0, '50': 1.0, '60': 1.0, '70': 1.0, '80': 1.0},
'Simvastatin': {'5': 1.0, '10': 1.0, '15': 1.0, '20': 1.0, '30': 1.0, '40': 1.0, '45': 1.0, '50': 1.0, '60': 1.0, '70': 1.0, '80': 1.0}}
Cldl_prestatin = 4.78407034
Cchol_prestatin = 6.77527799
Ctrig_prestatin = 4.65168793
Chdl_prestatin = 1.81018878
if prestatin == False:
if | pd.isnull(Cldl0) | pandas.isnull |
from collections import (
abc,
deque,
)
from decimal import Decimal
from warnings import catch_warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
PeriodIndex,
Series,
concat,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
class TestConcatenate:
def test_append_concat(self):
# GH#1815
d1 = date_range("12/31/1990", "12/31/1999", freq="A-DEC")
d2 = date_range("12/31/2000", "12/31/2009", freq="A-DEC")
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = concat([s1, s2])
assert isinstance(result.index, PeriodIndex)
assert result.index[0] == s1.index[0]
def test_concat_copy(self, using_array_manager):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for arr in result._mgr.arrays:
assert arr.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
assert arr.base is df._mgr.arrays[0].base
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
if using_array_manager:
# we get the same array object, which has no base
assert arr is df3._mgr.arrays[0]
else:
assert arr.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
if using_array_manager:
# this is a view on some array in either df or df4
assert any(
np.shares_memory(arr, other)
for other in df._mgr.arrays + df4._mgr.arrays
)
else:
# the block was consolidated, so we got a copy anyway
assert arr.base is None
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
# this is a view on df3
assert any(np.shares_memory(arr, other) for other in df3._mgr.arrays)
def test_concat_with_group_keys(self):
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = | DataFrame(np.r_[df.values, df2.values], index=exp_index) | pandas.DataFrame |
from matplotlib.dates import date2num, num2date
from matplotlib.colors import ListedColormap
from matplotlib import dates as mdates
from matplotlib import pyplot as plt
from matplotlib.patches import Patch
from matplotlib import ticker
from global_config import config
import matplotlib.pyplot as plt
import scipy.io as sio
import pandas as pd
import numpy as np
import os
from global_config import config
from functions.adjust_cases_functions import prepare_cases
from functions.plot_utils import plot_fit
from global_config import config
from models.seird_model import SEIRModel
from models.seird_model import SEIRD
from datetime import date, timedelta
import pandas as pd
import numpy as np
import datetime
import os
import sys
if len(sys.argv) < 2:
raise NotImplementedError()
else:
poly_run = int(sys.argv[1])
name_dir = str(sys.argv[2])
data_dir = config.get_property('data_dir_covid')
geo_dir = config.get_property('geo_dir')
data_dir_mnps = config.get_property('data_dir_col')
results_dir = config.get_property('results_dir')
agglomerated_folder = os.path.join(data_dir, 'data_stages', 'colombia', 'agglomerated', 'geometry' )
data = pd.read_csv(os.path.join(agglomerated_folder, 'cases.csv'), parse_dates=['date_time'],
dayfirst=True).set_index('poly_id').loc[poly_run].set_index('date_time')
data = data.resample('D').sum().fillna(0)[['num_cases','num_diseased']]
data = prepare_cases(data, col='num_cases', cutoff=0) # .rename({'smoothed_num_cases':'num_cases'})
data = prepare_cases(data, col='num_diseased', cutoff=0) # .rename({'smoothed_num_cases':'num_cases'})
data = data.rename(columns={'num_cases': 'confirmed', 'num_diseased':'death'})[['confirmed', 'death']]
data = prepare_cases(data, col='confirmed')
data = prepare_cases(data, col='death')
data['type'] = 'fitted'
data.iloc[-14:]['type'] = 'preliminary'
T_future = 28
path_to_checkpoints = os.path.join(results_dir, name_dir, 'checkpoints_agg')
import scipy.io as sio
x_post_forecast = sio.loadmat(os.path.join( path_to_checkpoints, 'forecast_xstates_bog'))['x_forecast']
para_post = sio.loadmat(os.path.join( path_to_checkpoints, '100_para_post_mean.mat'))['para_post_mean']
x_post = sio.loadmat(os.path.join( path_to_checkpoints, '100_x_post'))['x_post']
path_to_save = os.path.join(results_dir, 'weekly_forecast' , name_dir,
pd.to_datetime(data[data.type=='fitted'].index.values[-1]).strftime('%Y-%m-%d'))
pop = 8181047
parameters_csv = pd.DataFrame(np.mean(para_post[[0,1,-1],:,:].T, axis=1), columns=['beta_i','beta_a', 'ifr'], index=pd.date_range(start=pd.to_datetime(data[data.type=='fitted'].index.values[0]).strftime('%Y-%m-%d'), periods=para_post.shape[-1]))
parameters_csv.index.name = 'date'
parameters_csv['beta_a'] = parameters_csv['beta_i']*parameters_csv['beta_a']
parameters_csv.to_csv(os.path.join(path_to_save, 'parameters.csv'))
variables_csv = pd.DataFrame(np.maximum(np.mean(x_post[:7,:,:len(data)].T, axis=1),0), columns=['S','E', 'I', 'A', 'Id', 'cases','deaths'], index=pd.date_range(start=pd.to_datetime(data[data.type=='fitted'].index.values[0]).strftime('%Y-%m-%d'), periods=para_post.shape[-1]))
variables_csv.index.name = 'date'
variables_csv['R'] = pop-variables_csv['S']+variables_csv['E']+variables_csv['A']+variables_csv['I']+variables_csv['Id']#+variables_csv['deaths']
variables_csv = variables_csv[['S', 'E','A', 'I', 'Id', 'deaths','R']]
variables_csv['population'] = pop
variables_csv.to_csv(os.path.join(path_to_save, 'variables.csv'))
#variables_csv = variables_csv/pop*100
#variables_csv.to_csv(os.path.join(path_to_save, 'variables_percentage.csv'))
recovered = np.squeeze(sio.loadmat(os.path.join(path_to_checkpoints, 'recovered'))['recovered_all'])
recovered = recovered[:, :len(data[data.type=='fitted'])]
def create_df_response(samples, time, date_init ='2020-03-06', forecast_horizon=27, use_future=False):
dates_fitted = pd.date_range(start=pd.to_datetime(date_init), periods=time)
dates_forecast = pd.date_range(start=dates_fitted[-1]+datetime.timedelta(1), periods=forecast_horizon)
dates = list(dates_fitted)
types = ['estimate']*len(dates_fitted)
if use_future:
dates += list(dates_forecast)
types += ['forecast']*len(dates_forecast)
results_df = pd.DataFrame(samples.T)
df_response = pd.DataFrame(index=dates)
# Calculate key statistics
df_response['mean'] = results_df.mean(axis=1).values
df_response['median'] = results_df.median(axis=1).values
df_response['std'] = results_df.std(axis=1).values
df_response['low_975'] = results_df.quantile(q=0.025, axis=1).values
df_response['high_975'] = results_df.quantile(q=0.975, axis=1).values
df_response['low_90'] = results_df.quantile(q=0.1, axis=1).values
df_response['high_90'] = results_df.quantile(q=0.9, axis=1).values
df_response['low_75'] = results_df.quantile(q=0.25, axis=1).values
df_response['high_75'] = results_df.quantile(q=0.75, axis=1).values
df_response['type'] = types
df_response.index.name = 'date'
return df_response
df_response = create_df_response(np.cumsum(recovered, axis=1)/pop*10, recovered.shape[-1], date_init =pd.to_datetime(data[data.type=='fitted'].index.values[0]).strftime('%Y-%m-%d'))
df_response.to_csv(os.path.join(path_to_save, 'recovered_percentage.csv'))
fig, ax = plt.subplots(1, 1, figsize=(12.5, 7))
ax.plot(df_response.index.values, df_response["mean"], color='teal', alpha=0.4)
ax.fill_between(df_response.index.values, df_response["low_975"], df_response["high_975"], color='teal', alpha=0.6, label='95 % CI')
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b'))
ax.xaxis.set_minor_locator(mdates.DayLocator())
ax.xaxis.set_major_locator(mdates.WeekdayLocator())
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.grid(which='major', axis='y', c='k', alpha=.1, zorder=-2)
ax.grid(which='major', axis='x', c='k', alpha=.1, zorder=-2)
ax.tick_params(axis='both', labelsize=15)
ax.yaxis.set_major_formatter(ticker.StrMethodFormatter("{x:.1f} %"))
ax.set_ylabel(r'Recovered Fraction $R(t)/N$', fontsize=15)
ax.legend(loc='upper left')
fig.savefig(os.path.join(path_to_save, 'parameters','recovered.png'), dpi=300, bbox_inches='tight', transparent=False)
plt.show()
detection_rate = para_post[4,:,:]
detection_rate_df = create_df_response(detection_rate*100, detection_rate.shape[-1], date_init =pd.to_datetime(data[data.type=='fitted'].index.values[0]).strftime('%Y-%m-%d'))
I_df = create_df_response(x_post[2,:,:], x_post.shape[-1], date_init =pd.to_datetime(data[data.type=='fitted'].index.values[0]).strftime('%Y-%m-%d'))
A_df = create_df_response(x_post[3,:,:], x_post.shape[-1], date_init =pd.to_datetime(data[data.type=='fitted'].index.values[0]).strftime('%Y-%m-%d'))
Id_df = create_df_response(x_post[4,:,:], x_post.shape[-1], date_init = | pd.to_datetime(data[data.type=='fitted'].index.values[0]) | pandas.to_datetime |
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
from pandas.api import types as ptypes
import cudf
from cudf.api import types as types
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, True),
(pd.CategoricalDtype, True),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), True),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, True),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), True),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), True),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
# TODO: Currently creating an empty Series of list type ignores the
# provided type and instead makes a float64 Series.
(cudf.Series([[1, 2], [3, 4, 5]]), False),
# TODO: Currently creating an empty Series of struct type fails because
# it uses a numpy utility that doesn't understand StructDtype.
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_categorical_dtype(obj, expect):
assert types.is_categorical_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, True),
(int, True),
(float, True),
(complex, True),
(str, False),
(object, False),
# NumPy types.
(np.bool_, True),
(np.int_, True),
(np.float64, True),
(np.complex128, True),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), True),
(np.int_(), True),
(np.float64(), True),
(np.complex128(), True),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), True),
(np.dtype("int"), True),
(np.dtype("float"), True),
(np.dtype("complex"), True),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), True),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), True),
(np.array([], dtype=np.complex128), True),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), True),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), True),
(pd.Series(dtype="complex"), True),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, True),
(cudf.Decimal64Dtype, True),
(cudf.Decimal32Dtype, True),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), True),
(cudf.Decimal64Dtype(5, 2), True),
(cudf.Decimal32Dtype(5, 2), True),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), True),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), True),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), True),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_numeric_dtype(obj, expect):
assert types.is_numeric_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, True),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, True),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), True),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer_dtype(obj, expect):
assert types.is_integer_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), True),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer(obj, expect):
assert types.is_integer(obj) == expect
# TODO: Temporarily ignoring all cases of "object" until we decide what to do.
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, True),
# (object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, True),
(np.unicode_, True),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), True),
(np.unicode_(), True),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), True),
(np.dtype("unicode"), True),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
# (np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), True),
(np.array([], dtype=np.unicode_), True),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
# (np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), True),
(pd.Series(dtype="unicode"), True),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
# (pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), True),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_string_dtype(obj, expect):
assert types.is_string_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, True),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), True),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), True),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), True),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), True),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), True),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_datetime_dtype(obj, expect):
assert types.is_datetime_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, True),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), True),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), True),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_list_dtype(obj, expect):
assert types.is_list_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
( | pd.Series(dtype="datetime64[s]") | pandas.Series |
##### file path
### input
# data_set keys and lebels
path_df_part_1_uic_label = "df_part_1_uic_label.csv"
path_df_part_2_uic_label = "df_part_2_uic_label.csv"
path_df_part_3_uic = "df_part_3_uic.csv"
# data_set features
path_df_part_1_U = "df_part_1_U.csv"
path_df_part_1_I = "df_part_1_I.csv"
path_df_part_1_C = "df_part_1_C.csv"
path_df_part_1_IC = "df_part_1_IC.csv"
path_df_part_1_UI = "df_part_1_UI.csv"
path_df_part_1_UC = "df_part_1_UC.csv"
path_df_part_2_U = "df_part_2_U.csv"
path_df_part_2_I = "df_part_2_I.csv"
path_df_part_2_C = "df_part_2_C.csv"
path_df_part_2_IC = "df_part_2_IC.csv"
path_df_part_2_UI = "df_part_2_UI.csv"
path_df_part_2_UC = "df_part_2_UC.csv"
path_df_part_3_U = "df_part_3_U.csv"
path_df_part_3_I = "df_part_3_I.csv"
path_df_part_3_C = "df_part_3_C.csv"
path_df_part_3_IC = "df_part_3_IC.csv"
path_df_part_3_UI = "df_part_3_UI.csv"
path_df_part_3_UC = "df_part_3_UC.csv"
### out file
### intermediate file
# data partition with diffferent label
path_df_part_1_uic_label_0 = "df_part_1_uic_label_0.csv"
path_df_part_1_uic_label_1 = "df_part_1_uic_label_1.csv"
path_df_part_2_uic_label_0 = "df_part_2_uic_label_0.csv"
path_df_part_2_uic_label_1 = "df_part_2_uic_label_1.csv"
# training set keys uic-label with k_means clusters' label
path_df_part_1_uic_label_cluster = "df_part_1_uic_label_cluster.csv"
path_df_part_2_uic_label_cluster = "df_part_2_uic_label_cluster.csv"
# scalers for data standardization store as python pickle
# for each part's features
path_df_part_1_scaler = "df_part_1_scaler"
path_df_part_2_scaler = "df_part_2_scaler"
import pandas as pd
import numpy as np
def df_read(path, mode='r'):
'''the definition of dataframe loading function
'''
path_df = open(path, mode)
try:
df = pd.read_csv(path_df, index_col=False)
finally:
path_df.close()
return df
def subsample(df, sub_size):
'''the definition of sub-sampling function
@param df: dataframe
@param sub_size: sub_sample set size
@return sub-dataframe with the same formation of df
'''
if sub_size >= len(df):
return df
else:
return df.sample(n=sub_size)
########################################################################
'''Step 1: dividing of positive and negative sub-set by u-i-c-label keys
p.s. we first generate u-i-C key, then merging for data set and operation by chunk
such strange operation designed for saving my poor PC-MEM.
'''
df_part_1_uic_label = df_read(path_df_part_1_uic_label) # loading total keys
df_part_2_uic_label = df_read(path_df_part_2_uic_label)
df_part_1_uic_label_0 = df_part_1_uic_label[df_part_1_uic_label['label'] == 0]
df_part_1_uic_label_1 = df_part_1_uic_label[df_part_1_uic_label['label'] == 1]
df_part_2_uic_label_0 = df_part_2_uic_label[df_part_2_uic_label['label'] == 0]
df_part_2_uic_label_1 = df_part_2_uic_label[df_part_2_uic_label['label'] == 1]
df_part_1_uic_label_0.to_csv(path_df_part_1_uic_label_0, index=False)
df_part_1_uic_label_1.to_csv(path_df_part_1_uic_label_1, index=False)
df_part_2_uic_label_0.to_csv(path_df_part_2_uic_label_0, index=False)
df_part_2_uic_label_1.to_csv(path_df_part_2_uic_label_1, index=False)
#######################################################################
'''Step 2: clustering on negative sub-set
clusters number ~ 35, using mini-batch-k-means
'''
# clustering based on sklearn
from sklearn import preprocessing
from sklearn.cluster import MiniBatchKMeans
import pickle
##### part_1 #####
# loading features
df_part_1_U = df_read(path_df_part_1_U)
df_part_1_I = df_read(path_df_part_1_I)
df_part_1_C = df_read(path_df_part_1_C)
df_part_1_IC = df_read(path_df_part_1_IC)
df_part_1_UI = df_read(path_df_part_1_UI)
df_part_1_UC = df_read(path_df_part_1_UC)
# process by chunk as ui-pairs size is too big
# for get scale transform mechanism to large scale of data
scaler_1 = preprocessing.StandardScaler()
batch = 0
for df_part_1_uic_label_0 in pd.read_csv(open(path_df_part_1_uic_label_0, 'r'), chunksize=150000):
try:
# construct of part_1's sub-training set
train_data_df_part_1 = pd.merge(df_part_1_uic_label_0, df_part_1_U, how='left', on=['user_id'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_I, how='left', on=['item_id'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_C, how='left', on=['item_category'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_IC, how='left', on=['item_id', 'item_category'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_UI, how='left',
on=['user_id', 'item_id', 'item_category', 'label'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_UC, how='left', on=['user_id', 'item_category'])
# getting all the complete features for clustering
train_X_1 = train_data_df_part_1.as_matrix(
['u_b1_count_in_6', 'u_b2_count_in_6', 'u_b3_count_in_6', 'u_b4_count_in_6', 'u_b_count_in_6',
'u_b1_count_in_3', 'u_b2_count_in_3', 'u_b3_count_in_3', 'u_b4_count_in_3', 'u_b_count_in_3',
'u_b1_count_in_1', 'u_b2_count_in_1', 'u_b3_count_in_1', 'u_b4_count_in_1', 'u_b_count_in_1',
'u_b4_rate',
'i_u_count_in_6', 'i_u_count_in_3', 'i_u_count_in_1',
'i_b1_count_in_6', 'i_b2_count_in_6', 'i_b3_count_in_6', 'i_b4_count_in_6', 'i_b_count_in_6',
'i_b1_count_in_3', 'i_b2_count_in_3', 'i_b3_count_in_3', 'i_b4_count_in_3', 'i_b_count_in_3',
'i_b1_count_in_1', 'i_b2_count_in_1', 'i_b3_count_in_1', 'i_b4_count_in_1', 'i_b_count_in_1',
'i_b4_rate',
'c_b1_count_in_6', 'c_b2_count_in_6', 'c_b3_count_in_6', 'c_b4_count_in_6', 'c_b_count_in_6',
'c_b1_count_in_3', 'c_b2_count_in_3', 'c_b3_count_in_3', 'c_b4_count_in_3', 'c_b_count_in_3',
'c_b1_count_in_1', 'c_b2_count_in_1', 'c_b3_count_in_1', 'c_b4_count_in_1', 'c_b_count_in_1',
'c_b4_rate',
'ic_u_rank_in_c', 'ic_b_rank_in_c', 'ic_b4_rank_in_c',
'ui_b1_count_in_6', 'ui_b2_count_in_6', 'ui_b3_count_in_6', 'ui_b4_count_in_6', 'ui_b_count_in_6',
'ui_b1_count_in_3', 'ui_b2_count_in_3', 'ui_b3_count_in_3', 'ui_b4_count_in_3', 'ui_b_count_in_3',
'ui_b1_count_in_1', 'ui_b2_count_in_1', 'ui_b3_count_in_1', 'ui_b4_count_in_1', 'ui_b_count_in_1',
'ui_b_count_rank_in_u', 'ui_b_count_rank_in_uc',
'uc_b1_count_in_6', 'uc_b2_count_in_6', 'uc_b3_count_in_6', 'uc_b4_count_in_6', 'uc_b_count_in_6',
'uc_b1_count_in_3', 'uc_b2_count_in_3', 'uc_b3_count_in_3', 'uc_b4_count_in_3', 'uc_b_count_in_3',
'uc_b1_count_in_1', 'uc_b2_count_in_1', 'uc_b3_count_in_1', 'uc_b4_count_in_1', 'uc_b_count_in_1',
'uc_b_count_rank_in_u'])
# feature standardization
scaler_1.partial_fit(train_X_1)
batch += 1
print('chunk %d done.' % batch)
except StopIteration:
print("finish.")
break
# initial clusters
mbk_1 = MiniBatchKMeans(init='k-means++', n_clusters=1000, batch_size=500, reassignment_ratio=10 ** -4)
classes_1 = []
batch = 0
for df_part_1_uic_label_0 in pd.read_csv(open(path_df_part_1_uic_label_0, 'r'), chunksize=15000):
try:
# construct of part_1's sub-training set
train_data_df_part_1 = pd.merge(df_part_1_uic_label_0, df_part_1_U, how='left', on=['user_id'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_I, how='left', on=['item_id'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_C, how='left', on=['item_category'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_IC, how='left', on=['item_id', 'item_category'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_UI, how='left',
on=['user_id', 'item_id', 'item_category', 'label'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_UC, how='left', on=['user_id', 'item_category'])
train_X_1 = train_data_df_part_1.as_matrix(
['u_b1_count_in_6', 'u_b2_count_in_6', 'u_b3_count_in_6', 'u_b4_count_in_6', 'u_b_count_in_6',
'u_b1_count_in_3', 'u_b2_count_in_3', 'u_b3_count_in_3', 'u_b4_count_in_3', 'u_b_count_in_3',
'u_b1_count_in_1', 'u_b2_count_in_1', 'u_b3_count_in_1', 'u_b4_count_in_1', 'u_b_count_in_1',
'u_b4_rate',
'i_u_count_in_6', 'i_u_count_in_3', 'i_u_count_in_1',
'i_b1_count_in_6', 'i_b2_count_in_6', 'i_b3_count_in_6', 'i_b4_count_in_6', 'i_b_count_in_6',
'i_b1_count_in_3', 'i_b2_count_in_3', 'i_b3_count_in_3', 'i_b4_count_in_3', 'i_b_count_in_3',
'i_b1_count_in_1', 'i_b2_count_in_1', 'i_b3_count_in_1', 'i_b4_count_in_1', 'i_b_count_in_1',
'i_b4_rate',
'c_b1_count_in_6', 'c_b2_count_in_6', 'c_b3_count_in_6', 'c_b4_count_in_6', 'c_b_count_in_6',
'c_b1_count_in_3', 'c_b2_count_in_3', 'c_b3_count_in_3', 'c_b4_count_in_3', 'c_b_count_in_3',
'c_b1_count_in_1', 'c_b2_count_in_1', 'c_b3_count_in_1', 'c_b4_count_in_1', 'c_b_count_in_1',
'c_b4_rate',
'ic_u_rank_in_c', 'ic_b_rank_in_c', 'ic_b4_rank_in_c',
'ui_b1_count_in_6', 'ui_b2_count_in_6', 'ui_b3_count_in_6', 'ui_b4_count_in_6', 'ui_b_count_in_6',
'ui_b1_count_in_3', 'ui_b2_count_in_3', 'ui_b3_count_in_3', 'ui_b4_count_in_3', 'ui_b_count_in_3',
'ui_b1_count_in_1', 'ui_b2_count_in_1', 'ui_b3_count_in_1', 'ui_b4_count_in_1', 'ui_b_count_in_1',
'ui_b_count_rank_in_u', 'ui_b_count_rank_in_uc',
'uc_b1_count_in_6', 'uc_b2_count_in_6', 'uc_b3_count_in_6', 'uc_b4_count_in_6', 'uc_b_count_in_6',
'uc_b1_count_in_3', 'uc_b2_count_in_3', 'uc_b3_count_in_3', 'uc_b4_count_in_3', 'uc_b_count_in_3',
'uc_b1_count_in_1', 'uc_b2_count_in_1', 'uc_b3_count_in_1', 'uc_b4_count_in_1', 'uc_b_count_in_1',
'uc_b_count_rank_in_u'])
# feature standardization
standardized_train_X_1 = scaler_1.transform(train_X_1)
# fit clustering model
mbk_1.partial_fit(standardized_train_X_1)
classes_1 = np.append(classes_1, mbk_1.labels_)
batch += 1
print('chunk %d done.' % batch)
except StopIteration:
print(" ------------ k-means finished on part 1 ------------.")
break
del (df_part_1_U)
del (df_part_1_I)
del (df_part_1_C)
del (df_part_1_IC)
del (df_part_1_UI)
del (df_part_1_UC)
##### part_2 #####
# loading features
df_part_2_U = df_read(path_df_part_2_U)
df_part_2_I = df_read(path_df_part_2_I)
df_part_2_C = df_read(path_df_part_2_C)
df_part_2_IC = df_read(path_df_part_2_IC)
df_part_2_UI = df_read(path_df_part_2_UI)
df_part_2_UC = df_read(path_df_part_2_UC)
# process by chunk as ui-pairs size is too big
# for get scale transform mechanism to large scale of data
scaler_2 = preprocessing.StandardScaler()
batch = 0
for df_part_2_uic_label_0 in pd.read_csv(open(path_df_part_2_uic_label_0, 'r'), chunksize=150000):
try:
# construct of part_1's sub-training set
train_data_df_part_2 = pd.merge(df_part_2_uic_label_0, df_part_2_U, how='left', on=['user_id'])
train_data_df_part_2 = pd.merge(train_data_df_part_2, df_part_2_I, how='left', on=['item_id'])
train_data_df_part_2 = pd.merge(train_data_df_part_2, df_part_2_C, how='left', on=['item_category'])
train_data_df_part_2 = pd.merge(train_data_df_part_2, df_part_2_IC, how='left', on=['item_id', 'item_category'])
train_data_df_part_2 = pd.merge(train_data_df_part_2, df_part_2_UI, how='left',
on=['user_id', 'item_id', 'item_category', 'label'])
train_data_df_part_2 = pd.merge(train_data_df_part_2, df_part_2_UC, how='left', on=['user_id', 'item_category'])
train_X_2 = train_data_df_part_2.as_matrix(
['u_b1_count_in_6', 'u_b2_count_in_6', 'u_b3_count_in_6', 'u_b4_count_in_6', 'u_b_count_in_6',
'u_b1_count_in_3', 'u_b2_count_in_3', 'u_b3_count_in_3', 'u_b4_count_in_3', 'u_b_count_in_3',
'u_b1_count_in_1', 'u_b2_count_in_1', 'u_b3_count_in_1', 'u_b4_count_in_1', 'u_b_count_in_1',
'u_b4_rate',
'i_u_count_in_6', 'i_u_count_in_3', 'i_u_count_in_1',
'i_b1_count_in_6', 'i_b2_count_in_6', 'i_b3_count_in_6', 'i_b4_count_in_6', 'i_b_count_in_6',
'i_b1_count_in_3', 'i_b2_count_in_3', 'i_b3_count_in_3', 'i_b4_count_in_3', 'i_b_count_in_3',
'i_b1_count_in_1', 'i_b2_count_in_1', 'i_b3_count_in_1', 'i_b4_count_in_1', 'i_b_count_in_1',
'i_b4_rate',
'c_b1_count_in_6', 'c_b2_count_in_6', 'c_b3_count_in_6', 'c_b4_count_in_6', 'c_b_count_in_6',
'c_b1_count_in_3', 'c_b2_count_in_3', 'c_b3_count_in_3', 'c_b4_count_in_3', 'c_b_count_in_3',
'c_b1_count_in_1', 'c_b2_count_in_1', 'c_b3_count_in_1', 'c_b4_count_in_1', 'c_b_count_in_1',
'c_b4_rate',
'ic_u_rank_in_c', 'ic_b_rank_in_c', 'ic_b4_rank_in_c',
'ui_b1_count_in_6', 'ui_b2_count_in_6', 'ui_b3_count_in_6', 'ui_b4_count_in_6', 'ui_b_count_in_6',
'ui_b1_count_in_3', 'ui_b2_count_in_3', 'ui_b3_count_in_3', 'ui_b4_count_in_3', 'ui_b_count_in_3',
'ui_b1_count_in_1', 'ui_b2_count_in_1', 'ui_b3_count_in_1', 'ui_b4_count_in_1', 'ui_b_count_in_1',
'ui_b_count_rank_in_u', 'ui_b_count_rank_in_uc',
'uc_b1_count_in_6', 'uc_b2_count_in_6', 'uc_b3_count_in_6', 'uc_b4_count_in_6', 'uc_b_count_in_6',
'uc_b1_count_in_3', 'uc_b2_count_in_3', 'uc_b3_count_in_3', 'uc_b4_count_in_3', 'uc_b_count_in_3',
'uc_b1_count_in_1', 'uc_b2_count_in_1', 'uc_b3_count_in_1', 'uc_b4_count_in_1', 'uc_b_count_in_1',
'uc_b_count_rank_in_u'])
# fit the scaler
scaler_2.partial_fit(train_X_2)
batch += 1
print('chunk %d done.' % batch)
except StopIteration:
print("finish.")
break
# initial clusters
mbk_2 = MiniBatchKMeans(init='k-means++', n_clusters=1000, batch_size=500, reassignment_ratio=10 ** -4)
# process by chunk as ui-pairs size is too big
batch = 0
classes_2 = []
for df_part_2_uic_label_0 in pd.read_csv(open(path_df_part_2_uic_label_0, 'r'), chunksize=15000):
try:
# construct of part_1's sub-training set
train_data_df_part_2 = | pd.merge(df_part_2_uic_label_0, df_part_2_U, how='left', on=['user_id']) | pandas.merge |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core import ops
from pandas.errors import NullFrequencyError
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = pd.timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
| tm.assert_equal(result, expected) | pandas.util.testing.assert_equal |
__author__ = "<NAME>"
__copyright__ = "Sprace.org.br"
__version__ = "1.0.0"
import os
import numpy as np
import pandas as pd
#from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from enum import Enum
from pickle import dump, load
class FeatureType(Enum):
Divided = 1, # indica as caracteristicas estao divididas em posiciones e outras informacoes
Mixed = 2, # indica que todas as caracteristicas estao juntas
Positions = 3 # indica que so tem posicoes dos hits
class KindNormalization(Enum):
Scaling = 1,
Zscore = 2,
Polar = 3,
Nothing = 4
class Dataset():
def __init__(self, input_path, train_size, cylindrical, hits, kind_normalization, points_3d=True):
#np.set_printoptions(suppress=True)
# com index_col ja nao inclui a coluna index
dataframe = pd.read_csv(input_path, header=0, engine='python')
print("[Data] Data loaded from ", input_path)
self.kind = kind_normalization
if self.kind == KindNormalization.Scaling:
self.x_scaler = MinMaxScaler(feature_range=(-1, 1))
self.y_scaler = MinMaxScaler(feature_range=(-1, 1))
elif self.kind == KindNormalization.Zscore:
self.x_scaler = StandardScaler() # mean and standart desviation
self.y_scaler = StandardScaler() # mean and standart desviation
self.y_scaler_test = StandardScaler()
'''
if normalise:
data = self.scaler.fit_transform(dataframe.values)
data = pd.DataFrame(data, columns=columns)
else:
data = pd.DataFrame(dataframe.values, columns=columns)
'''
self.start_hits = 9
self.interval = 11
self.decimals = 4
self.data = dataframe.iloc[:, self.start_hits:]
#self.self = 0
if cylindrical:
self.coord_name = 'cylin'
else:
self.coord_name = 'xyz'
self.cylindrical = cylindrical
begin_coord = 0
end_coord = 0
begin_val = 10
end_val = 11
if self.cylindrical == False:
# if we choose points_3d = true then the filter is 3d data points : rho, eta, phi
# else then 2d data eta and phi
if points_3d:
begin_coord = 1
else:
begin_coord = 2
end_coord = 4
# cilyndrical coordinates
elif self.cylindrical == True:
if points_3d:
begin_coord = 4
else:
begin_coord = 5
end_coord = 7
begin_cols = [begin_coord+(self.interval*hit) for hit in range(0, hits)]
end_cols = [end_coord+(self.interval*hit) for hit in range(0, hits)]
new_df = pd.DataFrame()
for c in range(0,len(begin_cols)):
frame = self.data.iloc[:,np.r_[begin_cols[c]:end_cols[c]]]
new_df = pd.concat([new_df, frame], axis=1)
self.data = new_df
# we nee remove data for avoid problems
res = len(self.data) % 10
if res != 0:
# this is a big bug. the easy solution was removing some values non divided with 10.
print('\t We have removed %s unuseful tracks. We believe you need to know. ' % res)
self.data = self.data.iloc[:-res,:]
i_split = int(len(self.data) * train_size)
self.data_train = self.data.iloc[0:i_split,0:]
self.data_test = self.data.iloc[i_split:,0:]
print("[Data] Data set shape ", self.data.shape)
print("[Data] Data train shape ", self.data_train.shape)
print("[Data] Data test shape ", self.data_test.shape)
print("[Data] Data coordinates ", self.coord_name)
print("[Data] Data normalization type ", self.kind)
def prepare_training_data(self, feature_type, normalise=True, cylindrical=False):
if not isinstance(feature_type, FeatureType):
raise TypeError('direction must be an instance of FeatureType Enum')
self.cylindrical = cylindrical
interval = self.interval
# x, y, z coordinates
if cylindrical == False:
bp=1
ep=4
bpC=10
epC=11
# cilyndrical coordinates
elif cylindrical == True:
bp=4
ep=7
bpC=10
epC=11
df_hits_values = None
df_hits_positions = None
if feature_type==FeatureType.Divided:
# get hits positions p1(X1,Y1,Z1) p2(X2,Y2,Z2) p3(X3,Y3,Z3) p4(X4,Y4,Z4)
df_hits_positions = self.data.iloc[:, np.r_[
bp:ep,
bp+(interval*1):ep+(interval*1),
bp+(interval*2):ep+(interval*2),
bp+(interval*3):ep+(interval*3)]]
# get hits values p1(V1,V2,V3,V4)
df_hits_values = self.data.iloc[:, np.r_[
bpC:epC,
bpC+(interval*1):epC+(interval*1),
bpC+(interval*2):epC+(interval*2),
bpC+(interval*3):epC+(interval*3)]]
frames = [df_hits_positions, df_hits_values]
df_hits_positions = pd.concat(frames, axis=1)
if feature_type==FeatureType.Mixed:
df_hits_positions = self.data.iloc[:, np.r_[
bp:ep,
bpC:epC,
bp+(interval*1):ep+(interval*1), bpC+(interval*1):epC+(interval*1),
bp+(interval*2):ep+(interval*2), bpC+(interval*2):epC+(interval*2),
bp+(interval*3):ep+(interval*3), bpC+(interval*3):epC+(interval*3)]]
elif feature_type==FeatureType.Positions:
df_hits_positions = self.data.iloc[:, np.r_[
bp:ep,
bp+(interval*1):ep+(interval*1),
bp+(interval*2):ep+(interval*2),
bp+(interval*3):ep+(interval*3)]]
self.x_data = df_hits_positions
self.y_data = self.data.iloc[:, np.r_[bp+(interval*4):(bp+(interval*4)+3)]]
self.len = len(self.data)
xcolumns = self.x_data.columns
ycolumns = self.y_data.columns
# normalization just of features.
if normalise:
xscaled = self.x_scaler.fit_transform(self.x_data.values)
self.x_data = pd.DataFrame(xscaled, columns=xcolumns)
yscaled = self.y_scaler.fit_transform(self.y_data.values)
self.y_data = pd.DataFrame(yscaled, columns=ycolumns)
print("[Data] shape datas X: ", self.x_data.shape)
print("[Data] shape data y: ", self.y_data.shape)
print('[Data] len data total:', self.len)
#y_hit_info = self.getitem_by_hit(hit_id)
if feature_type==FeatureType.Divided:
# return x_data, y_data normalizated with data splited
return (self.x_data.iloc[:,0:12], self.x_data.iloc[:,-4:], self.y_data)
else:
# return x_data, y_data normalizated with no data splited
return (self.x_data, self.y_data)
def get_training_data(self, n_hit_in, n_hit_out, n_features, normalise=False):
'''
n_hit_in : 4 number of hits
n_hit_out: 1 number of future hits
n_features 3
'''
X , Y = [],[]
sequences = self.data_train.values
rows = sequences.shape[0]
cols = sequences.shape[1]
for i in range(0, rows):
end_idx = 0
out_end_idx = 0
for j in range(0, cols, n_features):
end_ix = j + n_hit_in*n_features
out_end_idx = end_ix + n_hit_out*n_features
if out_end_idx > cols+1:
#print('corta ', out_end_idx)
break
#if i < 5:
# print('[%s,%s:%s][%s,%s:%s]' % (i, j, end_ix, i, end_ix, out_end_idx))
#seq_x, seq_y = sequences.iloc[i, j:end_ix], sequences.iloc[i, end_ix:out_end_idx]
seq_x, seq_y = sequences[i, j:end_ix], sequences[i, end_ix:out_end_idx]
X.append(seq_x)
Y.append(seq_y)
x_data, y_data = 0,0
# normalization just of features.
if normalise:
xscaled = self.x_scaler.fit_transform(X)
x_data = pd.DataFrame(xscaled)
yscaled = self.y_scaler.fit_transform(Y)
y_data = pd.DataFrame(yscaled)
#if save_params:
# self.save_scale_param()
else:
x_data = pd.DataFrame(X)
y_data = pd.DataFrame(Y)
#return pd.DataFrame(x_data).round(self.decimals) , pd.DataFrame(y_data).round(self.decimals)
return | pd.DataFrame(x_data) | pandas.DataFrame |
import pandas as pd
from scipy import stats
import numpy as np
import math
import os
import sys
import json, csv
import itertools as it
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import scikit_posthocs
from statsmodels.sandbox.stats.multicomp import multipletests
from collections import OrderedDict
from sklearn.metrics import r2_score
from scipy.stats import distributions
from scipy.stats.stats import find_repeats
import warnings
def wilcoxon(x, y=None, zero_method="wilcox", correction=False,
alternative="two-sided"):
"""
scipy stats function https://github.com/scipy/scipy/blob/v1.2.1/scipy/stats/morestats.py#L2709-L2806
Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
Either the first set of measurements (in which case `y` is the second
set of measurements), or the differences between two sets of
measurements (in which case `y` is not to be specified.) Must be
one-dimensional.
y : array_like, optional
Either the second set of measurements (if `x` is the first set of
measurements), or not specified (if `x` is the differences between
two sets of measurements.) Must be one-dimensional.
zero_method : {'pratt', 'wilcox', 'zsplit'}, optional
The following options are available (default is 'wilcox'):
* 'pratt': Includes zero-differences in the ranking process,
but drops the ranks of the zeros, see [4]_, (more conservative).
* 'wilcox': Discards all zero-differences, the default.
* 'zsplit': Includes zero-differences in the ranking process and
split the zero rank between positive and negative ones.
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic. Default is False.
alternative : {"two-sided", "greater", "less"}, optional
The alternative hypothesis to be tested, see Notes. Default is
"two-sided".
Returns
-------
statistic : float
If `alternative` is "two-sided", the sum of the ranks of the
differences above or below zero, whichever is smaller.
Otherwise the sum of the ranks of the differences above zero.
pvalue : float
The p-value for the test depending on `alternative`.
See Also
--------
kruskal, mannwhitneyu
Notes
-----
The test has been introduced in [4]_. Given n independent samples
(xi, yi) from a bivariate distribution (i.e. paired samples),
it computes the differences di = xi - yi. One assumption of the test
is that the differences are symmetric, see [2]_.
The two-sided test has the null hypothesis that the median of the
differences is zero against the alternative that it is different from
zero. The one-sided test has the null hypothesis that the median is
positive against the alternative that it is negative
(``alternative == 'less'``), or vice versa (``alternative == 'greater.'``).
The test uses a normal approximation to derive the p-value (if
``zero_method == 'pratt'``, the approximation is adjusted as in [5]_).
A typical rule is to require that n > 20 ([2]_, p. 383). For smaller n,
exact tables can be used to find critical values.
References
----------
.. [1] https://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
.. [2] <NAME>., Practical Nonparametric Statistics, 1971.
.. [3] Pratt, J.W., Remarks on Zeros and Ties in the Wilcoxon Signed
Rank Procedures, Journal of the American Statistical Association,
Vol. 54, 1959, pp. 655-667. :doi:`10.1080/01621459.1959.10501526`
.. [4] <NAME>., Individual Comparisons by Ranking Methods,
Biometrics Bulletin, Vol. 1, 1945, pp. 80-83. :doi:`10.2307/3001968`
.. [5] <NAME>., The Normal Approximation to the Signed-Rank
Sampling Distribution When Zero Differences are Present,
Journal of the American Statistical Association, Vol. 62, 1967,
pp. 1068-1069. :doi:`10.1080/01621459.1967.10500917`
Examples
--------
In [4]_, the differences in height between cross- and self-fertilized
corn plants is given as follows:
>>> d = [6, 8, 14, 16, 23, 24, 28, 29, 41, -48, 49, 56, 60, -67, 75]
Cross-fertilized plants appear to be be higher. To test the null
hypothesis that there is no height difference, we can apply the
two-sided test:
>>> from scipy.stats import wilcoxon
>>> w, p = wilcoxon(d)
>>> w, p
(24.0, 0.04088813291185591)
Hence, we would reject the null hypothesis at a confidence level of 5%,
concluding that there is a difference in height between the groups.
To confirm that the median of the differences can be assumed to be
positive, we use:
>>> w, p = wilcoxon(d, alternative='greater')
>>> w, p
(96.0, 0.020444066455927955)
This shows that the null hypothesis that the median is negative can be
rejected at a confidence level of 5% in favor of the alternative that
the median is greater than zero. The p-value based on the approximation
is within the range of 0.019 and 0.054 given in [2]_.
Note that the statistic changed to 96 in the one-sided case (the sum
of ranks of positive differences) whereas it is 24 in the two-sided
case (the minimum of sum of ranks above and below zero).
"""
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method should be either 'wilcox' "
"or 'pratt' or 'zsplit'")
if alternative not in ["two-sided", "less", "greater"]:
raise ValueError("Alternative must be either 'two-sided', "
"'greater' or 'less'")
if y is None:
d = np.asarray(x)
if d.ndim > 1:
raise ValueError('Sample x must be one-dimensional.')
else:
x, y = map(np.asarray, (x, y))
if x.ndim > 1 or y.ndim > 1:
raise ValueError('Samples x and y must be one-dimensional.')
if len(x) != len(y):
raise ValueError('The samples x and y must have the same length.')
d = x - y
if zero_method in ["wilcox", "pratt"]:
n_zero = np.sum(d == 0, axis=0)
if n_zero == len(d):
raise ValueError("zero_method 'wilcox' and 'pratt' do not work if "
"the x - y is zero for all elements.")
if zero_method == "wilcox":
# Keep all non-zero differences
d = np.compress(np.not_equal(d, 0), d, axis=-1)
count = len(d)
if count < 10:
warnings.warn("Sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = np.sum((d > 0) * r, axis=0)
r_minus = np.sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = np.sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
# return min for two-sided test, but r_plus for one-sided test
# the literature is not consistent here
# r_plus is more informative since r_plus + r_minus = count*(count+1)/2,
# i.e. the sum of the ranks, so r_minus and the min can be inferred
# (If alternative='pratt', r_plus + r_minus = count*(count+1)/2 - r_zero.)
# [3] uses the r_plus for the one-sided test, keep min for two-sided test
# to keep backwards compatibility
if alternative == "two-sided":
T = min(r_plus, r_minus)
else:
T = r_plus
mn = count * (count + 1.) * 0.25
se = count * (count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
# normal approximation needs to be adjusted, see Cureton (1967)
mn -= n_zero * (n_zero + 1.) * 0.25
se -= n_zero * (n_zero + 1.) * (2. * n_zero + 1.)
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = np.sqrt(se / 24)
# apply continuity correction if applicable
d = 0
if correction:
if alternative == "two-sided":
d = 0.5 * np.sign(T - mn)
elif alternative == "less":
d = -0.5
else:
d = 0.5
# compute statistic and p-value using normal approximation
z = (T - mn - d) / se
if alternative == "two-sided":
prob = 2. * distributions.norm.sf(abs(z))
elif alternative == "greater":
# large T = r_plus indicates x is greater than y; i.e.
# accept alternative in that case and return small p-value (sf)
prob = distributions.norm.sf(z)
else:
prob = distributions.norm.cdf(z)
return T, prob, z
def get_effect_size_text(effect_size):
if effect_size == None:
effect_name = "unknown"
elif 0.1 <= effect_size < 0.25:
effect_name = "uphill weak"
elif 0.25 <= effect_size < 0.4:
effect_name = "uphill moderate"
elif effect_size >= 0.4:
effect_name = "uphill strong"
elif -0.1 >= effect_size > -0.25:
effect_name = "downhill weak"
elif -0.25 >= effect_size > -0.4:
effect_name = "downhill moderate"
elif effect_size <= -0.4:
effect_name = "downhill strong"
else:
effect_name = "unsure"
return effect_name
def get_p_value_stars(p_value):
if p_value <= 0.01:
return "***"
elif p_value <= 0.05:
return "**"
elif p_value <= 0.1:
return "*"
else:
return ""
def get_result_sent(test_name, feature_name, corpus_name, p_value, n_complex, avg_complex, sd_complex, n_simple, avg_simple, sd_simple, df, t_value, effect_size, p_threshold=0.05, only_relevant=False):
effect_name = get_effect_size_text(effect_size)
if 0 <= p_value <= p_threshold:
is_significant = "a"
p_value_text = "p<="+str(p_threshold)
else:
is_significant = "no"
p_value_text = "p>"+str(p_threshold)
if test_name == "No test" or effect_size == None:
return "The average of {} for complex sentences is {} (SD={}, n={}) and for simple sentences {} (SD={}).".format(feature_name, round(avg_complex,2), round(sd_complex, 2), n_complex, round(avg_simple, 2), round(sd_simple, 2))
if only_relevant:
if p_value > p_threshold or effect_size == None or effect_size < 0.1:
return None
return "A {} was conducted to compare {} in the {} corpus. " \
"There is {} significant ({}) difference in the scores for complex (n={}, M={}, SD={}) and " \
"simplified (n={}, M={}, SD={}) sentences, t({})={}. " \
"These results that the simplification level has a {} effect (r={}) on {}.\n".format(test_name, feature_name,
corpus_name, is_significant,
p_value_text, n_complex, round(avg_complex,2),
round(sd_complex,2), n_simple, round(avg_simple,2),
round(sd_simple,2), df, round(t_value,2),
effect_name, round(effect_size,2),
feature_name)
def get_variable_names(col_names, feat_dict_path="feature_dict_checked.json", comparable=False, paired=True, difference=False):
if comparable:
return sorted(list(set(["_".join(col.split("_")[:-1]) for col in col_names if col.endswith("_complex") or col.endswith("_simple")])))
elif paired:
return sorted([col for col in col_names if col.endswith("_paired")])
elif difference:
return sorted([col for col in col_names if col.endswith("_diff")])
else:
return sorted(list(col_names))
def add_difference_features(input_data):
comparable_names = get_variable_names(input_data.columns.values, comparable=True, paired=False)
for feat in comparable_names:
input_data[feat+"_diff"] = input_data[feat+"_complex"].astype(np.float) - input_data[feat+"_simple"].astype(np.float)
return input_data
def change_dtype(input_data, col_names, comparable=True):
if comparable:
old_names = col_names
col_names = list()
for col in old_names:
col_names.append(col+"_complex")
col_names.append(col+"_simple")
# do_statistics.py:409: DtypeWarning: Columns (54,55,56,60,61,62) have mixed types. Specify dtype option on import or set low_memory=False.
# en newsela 2015
input_data.replace(False, 0, inplace=True)
input_data.replace("False", 0, inplace=True)
input_data.replace(True, 1, inplace=True)
input_data.replace("True", 1, inplace=True)
input_data[col_names] = input_data[col_names].apply(pd.to_numeric)
return input_data
def test_distribution_null_hypothesis(complex_values, simple_values, independent, feat_name, dict_path="feature_dict_checked.json"):
complex_values = complex_values[complex_values.notnull()]
simple_values = simple_values[simple_values.notnull()]
# todo: remove if all values 0 or nan
if len(complex_values) == 0 or len(simple_values) == 0 or \
(complex_values == 0).sum() == len(complex_values) or \
(simple_values == 0).sum() == len(simple_values) or \
list(complex_values) == list(simple_values):
return ("0", 0, 0, None)
# # 0: nominal, 1: ordinal, 2: interval, 3: ratio
# scale_of_measurement = check_scale(complex_values)
scale_of_measurement = check_scale_from_dict(dict_path, "comparable", feat_name)
normal_distribution = check_distribution([complex_values, simple_values], p_threshold=0.05)
variance_homogeneity = check_variance_homogeneity([complex_values, simple_values], p_threshold=0.05)
if scale_of_measurement >= 2 and normal_distribution and variance_homogeneity and independent:
t_value, p_value = stats.ttest_ind(complex_values, simple_values, equal_var=True)
effect_size = abs(math.sqrt(t_value ** 2 / (t_value ** 2 + min(complex_values, simple_values) - 1)))
return ("Student's t-test", t_value, p_value, effect_size)
elif scale_of_measurement >= 2 and normal_distribution and not variance_homogeneity and independent:
t_value, p_value = stats.ttest_ind(complex_values, simple_values, equal_var=False)
effect_size = abs(math.sqrt(t_value ** 2 / (t_value ** 2 + min(complex_values, simple_values) - 1)))
return ("Welch's t-test", t_value, p_value, effect_size)
elif scale_of_measurement >= 1 and independent:
t_value, p_value = stats.mannwhitneyu(complex_values, simple_values)
#effect_size = get_effect_size(t_value, min(len(complex_values), len(simple_values)))
return ("Mann–Whitney U test", t_value, p_value, None)
elif scale_of_measurement >= 2 and normal_distribution and variance_homogeneity and not independent:
t_value, p_value = stats.ttest_rel(complex_values, simple_values)
# effect_size = abs(math.sqrt(t_value**2/(t_value**2+min(complex_values, simple_values)-1)))
effect_size = stats.pearsonr(complex_values, simple_values)[0]
return ("Student's t-test", t_value, p_value, effect_size)
elif scale_of_measurement >= 1 and not independent:
if len(complex_values) != len(simple_values):
return ("No test", np.mean(complex_values), np.mean(simple_values), None)
t_value, p_value, z_value = wilcoxon(complex_values, simple_values)
effect_size = abs(z_value/math.sqrt(min(len(complex_values), len(simple_values))))
#effect_size = stats.pearsonr(complex_values, simple_values)[0]
return ("Wilcoxon signed-rank test", t_value, p_value, effect_size)
else:
# todo name only distribution of values?
return ("No test", np.mean(complex_values), np.mean(simple_values), None)
def posthoc_dunn_z(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] <NAME> (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] <NAME> (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn_z(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
#p_value = 2. * ss.norm.sf(np.abs(z_value))
return z_value
x, _val_col, _group_col = scikit_posthocs.__convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = pd.Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn_z(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return pd.DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def compare_languages(list_lang_results, feat_name, list_corpus_names, p_threshold=0.05, dict_path="feature_dict_checked.json"):
list_lang_no_nan = list()
corpus_names = OrderedDict()
for lang_values, corpus_name in zip(list_lang_results, list_corpus_names):
no_nans = lang_values[lang_values.notnull()]
if len(no_nans) > 0:
list_lang_no_nan.append(no_nans)
corpus_names[corpus_name] = len(no_nans)
if len(list_lang_no_nan) == 0:
return 0,0
# scale_of_measurement = check_scale(list_lang_no_nan[0])
scale_of_measurement = check_scale_from_dict(dict_path, "paired", feat_name)
# # 0: nominal, 1: ordinal, 2: interval, 3: ratio
normal_distribution = check_distribution(list_lang_no_nan, p_threshold=0.05)
variance_homogeneity = check_variance_homogeneity(list_lang_no_nan, p_threshold=0.05)
if scale_of_measurement >= 2 and normal_distribution and variance_homogeneity:
# does the language affect the value of the feature? Does simplifications for each langauge work similar?
t_value, p_value = stats.f_oneway(*list_lang_no_nan)
return ("ANOVA", p_value)
#if p_value <= p_threshold:
# posthoc: which langauges are different?
# stats.multicomp.pairwise_tukeyhsd
# if two different ones found, use pearson to get effect size
#effect_size = stats.pearsonr(complex_values, simple_values)[0]
# effec_size = cohend(complex_values, simple_values)
elif scale_of_measurement >= 1:
try:
h_statistic, p_value = stats.kruskal(*list_lang_no_nan)
except ValueError:
return 0,0
if 0 < p_value <= p_threshold:
if p_value <= 0.01:
p_value = "p<=.01"
elif p_value <= 0.05:
p_value = "p<=.05"
else:
p_value = "p>0.05"
output_list = list()
posthoc_frame = scikit_posthocs.posthoc_dunn(list_lang_no_nan, p_adjust="holm")
posthoc_frame_z = posthoc_dunn_z(list_lang_no_nan)
for i, name_corpus_col in zip(posthoc_frame.columns.values, corpus_names.keys()):
for n, name_corpus_row in zip(range(0, len(posthoc_frame)), corpus_names.keys()):
if p_threshold >= posthoc_frame.iloc[n][i] > 0:
effect_size = abs(posthoc_frame_z.iloc[n][i]/math.sqrt(corpus_names[name_corpus_col]+corpus_names[name_corpus_row]))
if effect_size >= 0.1:
output_list.append(["Kruskal ", p_value, "effectsize", str(round(effect_size, 4)),
"h", str(round(h_statistic, 4)), "z", str(round(posthoc_frame_z.iloc[n][i],4)), name_corpus_col, name_corpus_row])
#pos_col = list(corpus_names.keys()).index(name_corpus_col)
#pos_row = list(corpus_names.keys()).index(name_corpus_row)
#effect_size_pearson = stats.pearsonr(list_lang_no_nan[pos_col], list_lang_no_nan[pos_row])[0]
# print(len(list_lang_no_nan[pos_col]), len(list_lang_no_nan[pos_row]))
# effect_size_cohen = cohend(list_lang_no_nan[pos_col], list_lang_no_nan[pos_row])
return output_list
else:
return 0, 0
else:
return 0, 0
def cohend(d1, d2):
# code from here https://machinelearningmastery.com/effect-size-measures-in-python/
# calculate the size of samples
n1, n2 = len(d1), len(d2)
# calculate the variance of the samples
s1, s2 = np.var(d1, ddof=1), np.var(d2, ddof=1)
# calculate the pooled standard deviation
s = math.sqrt(((n1 - 1) * s1 + (n2 - 1) * s2) / (n1 + n2 - 2))
# calculate the means of the samples
u1, u2 = np.mean(d1), np.mean(d2)
# calculate the effect size
return (u1 - u2) / s
def get_descriptive_values(input_values):
input_values = input_values[input_values.notnull()]
return len(input_values), np.mean(input_values), np.std(input_values)
def get_effect_size(z_value, n):
return abs(z_value/math.sqrt(n))
def scale_value_to_text(value):
dict_scale = {0: "nominal", 1: "ordinal", 2: "interval", 3: "ratio"}
return dict_scale[value]
def check_scale(input_series):
# 0: nominal, 1: ordinal, 2: interval, 3: ratio
# enough to check one scale because both have equal values
if len(set(input_series).difference({0,1})) == 0: #input_series.all() in [0, 1]:
return 0
elif all(0 <= i <= 1 for i in input_series):
return 3
else:
return 1
# if len(values.difference({0,1})) <= 1:
# # including nan value
# return "nominal"
# else:
# return "interval"
def check_scale_from_dict(dict_path, comparable_or_paired, feat_name):
with open(dict_path) as f:
data = json.load(f)
if feat_name in data[comparable_or_paired].keys():
return data[comparable_or_paired][feat_name]["measurement_scale"]
else:
#print(feat_name, " no information in feature dict provided.")
return 1
def check_distribution(list_series, p_threshold=0.05):
normal_distribution = False
for input_series in list_series:
w, p_value = stats.shapiro(input_series)
if p_value >= p_threshold:
# if significant no normal distribution, hence p_value must be greater or equal to threshold
normal_distribution = True
else:
normal_distribution = False
return normal_distribution
def check_variance_homogeneity(list_values, p_threshold=0.05):
w, p_value = stats.levene(*list_values)
if p_value >= p_threshold:
# if significant then the values are heterogeneous, hence p_value must be greater or equal to threshold
return True
else:
return False
def strong_effect_bold(val):
# bold = 'bold' if not isinstance(val, str) and float(val) >= 0.5 else ''
# return 'font-weight: %s' % bold
if isinstance(val, str):
color = 'black'
elif float(val) >= 0.4:
color = "darkblue"
elif float(val) >= 0.25:
color = "darkgreen"
else:
color = "violet"
return 'color: %s' % color
def get_effect_stars(p_val, effect_size, p_threshold=0.05):
if p_val <= p_threshold:
if effect_size >= 0.4:
return "***"
elif effect_size >= 0.25:
return "**"
elif effect_size >= 0.1:
return "*"
else:
return ""
else:
return ""
def get_statistics(input_data, comparable_col_names, paired_col_names, corpus_name, output_file_text, output_file_descriptive_table, output_file_effect_table, p_threshold=0.05, key=""):
result_sents = list()
result_table = pd.DataFrame(columns=["feature", corpus_name])
columns_descr = pd.MultiIndex.from_tuples(
[("feature", ""), (corpus_name, "complex"), (corpus_name, "simple"),(corpus_name, "effect size")])
#columns_descr = pd.MultiIndex.from_tuples([("feature", ""), (corpus_name, "N"), (corpus_name, "AVG (SD) complex"), (corpus_name, "AVG (SD) simple"), ("effect_size", "")])
#[["feature", corpus_name], ["", "N", "AVG (SD) complex", "AVG (SD) simple"]])
descriptive_table = pd.DataFrame(columns=columns_descr)
columns_descr_paired = pd.MultiIndex.from_tuples([("feature", ""), (corpus_name, "N"), (corpus_name, "AVG paired"), (corpus_name, "SD paired")])
descriptive_table_paired = pd.DataFrame(columns=columns_descr_paired)
# print(input_data.describe())
# print(comparable_col_names)
for i, col in enumerate(comparable_col_names):
#if col in ["check_if_head_is_noun", "check_if_head_is_verb", "check_if_one_child_of_root_is_subject", "check_passive_voice",
# "count_characters", "count_sentences", "count_syllables_in_sentence", "get_average_length_NP",
# "get_average_length_VP", "get_avg_length_PP", "get_ratio_named_entities",
# "get_ratio_of_interjections", "get_ratio_of_particles", "get_ratio_of_symbols",
# "get_ratio_referential", "is_non_projective"]:
# continue
# print(col, corpus_name, len(input_data[input_data[col+"_complex"].notnull()]), len(input_data[input_data[col+"_simple"].notnull()]))
test_name, t_value, p_value, effect_size = test_distribution_null_hypothesis(input_data[col+"_complex"], input_data[col+"_simple"], False, col)
n_complex, avg_complex, sd_complex = get_descriptive_values(input_data[col+"_complex"])
n_simple, avg_simple, sd_simple = get_descriptive_values(input_data[col + "_simple"])
# print(col, test_name, t_value, p_value, effect_size, "complex", n_complex, avg_complex, sd_complex, "simple", n_simple, avg_simple, sd_simple)
result_sent = get_result_sent(test_name, col, corpus_name, p_value, n_complex, avg_complex, sd_complex, n_simple, avg_simple, sd_simple, min(n_complex, n_simple)-1, t_value, effect_size, p_threshold=0.05, only_relevant=True)
if result_sent:
result_sents.append(result_sent)
if effect_size == None:
effect_size = 0
if p_value > p_threshold or effect_size < 0.1:
result_table.loc[i] = [col, ""]
else:
result_table.loc[i] = [col, str(round(effect_size,2))+get_p_value_stars(p_value)]
descriptive_table.loc[i] = [col, str(round(avg_complex, 2))+"$\pm$"+str(round(sd_complex,2))+"", str(round(avg_simple, 2))+"$\pm$"+str(round(sd_simple,2))+"", get_effect_stars(p_value, effect_size, p_threshold=0.05)]
descriptive_table.loc[i+1] = ["N", "", n_complex, ""]
for n, col in enumerate(paired_col_names):
n_paired, avg_paired, sd_paired = get_descriptive_values(input_data[col])
# print(col, test_name, t_value, p_value, effect_size, "complex", n_complex, avg_complex, sd_complex, "simple", n_simple, avg_simple, sd_simple)
descriptive_table_paired.loc[n] = [col, n_paired,
round(avg_paired, 2), "$\pm$" + str(round(sd_paired, 2))]
if output_file_text:
with open(output_file_text, "w+") as f:
f.writelines(result_sents)
with open(output_file_effect_table, "w+") as f:
f.write(result_table.to_latex(index=False, escape=False)+"\n\n")
result_table.set_index("feature")
# result_table_excel = result_table.style.applymap(strong_effect_bold)
# result_table_excel.to_excel(corpus_name+'styled.xlsx', engine='openpyxl')
# if output_file_table:
with open(output_file_descriptive_table, "w+") as f:
f.write(descriptive_table.to_latex(index=False, escape=False))
return input_data, descriptive_table, result_table, descriptive_table_paired
def save_results(concat_descr, concat_effect, concat_descr_paired, output_descr_paired, type_value=""):
type_value_dir = ""
if type_value:
if not os.path.exists("data/results/"+type_value):
os.makedirs("data/results/"+type_value)
type_value_dir = type_value+"/"
type_value = "_"+type_value
with open("data/results/"+type_value_dir+"all_descr_results"+type_value+".txt", "w") as f:
f.write(concat_descr.to_latex(index=False, escape=False))
with open("data/results/"+type_value_dir+"all_descr_results"+type_value+".csv", "w") as f:
f.write(concat_descr.to_csv(index=False))
with open("data/results/"+type_value_dir+"all_effect_results"+type_value+".txt", "w") as f:
f.write(concat_effect.to_latex(index=False, escape=False))
with open("data/results/"+type_value_dir+"all_effect_results"+type_value+".csv", "w") as f:
f.write(concat_effect.to_csv(index=False))
with open("data/results/"+type_value_dir+"all_descr_paired_results"+type_value+".txt", "w") as f:
f.write(concat_descr_paired.to_latex(index=False, escape=False))
with open("data/results/"+type_value_dir+"all_descr_paired_results.csv", "w") as f:
f.write(concat_descr_paired.to_csv(index=False))
with open("data/results/"+type_value_dir+"all_effect_paired_results"+type_value+".txt", "w") as f:
f.write(output_descr_paired)
return 1
def get_feature_dict(result_files):
list_lang_input = list()
for input_file in result_files:
input_data = | pd.read_csv("data/ALL/"+input_file, sep="\t", header=0, warn_bad_lines=True, error_bad_lines=False) | pandas.read_csv |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
import pandas.compat as compat
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
CategoricalIndex, DatetimeIndex, Float64Index, Index, Int64Index,
IntervalIndex, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex,
UInt64Index, isna)
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'nbytes']
def setup_indices(self):
for name, idx in self.indices.items():
setattr(self, name, idx)
def test_pickle_compat_construction(self):
# need an object to create with
msg = (r"Index\(\.\.\.\) must be called with a collection of some"
r" kind, None was passed|"
r"__new__\(\) missing 1 required positional argument: 'data'|"
r"__new__\(\) takes at least 2 arguments \(1 given\)")
with pytest.raises(TypeError, match=msg):
self._holder()
def test_to_series(self):
# assert that we are creating a copy of the index
idx = self.create_index()
s = idx.to_series()
assert s.values is not idx.values
assert s.index is not idx
assert s.name == idx.name
def test_to_series_with_arguments(self):
# GH18699
# index kwarg
idx = self.create_index()
s = idx.to_series(index=idx)
assert s.values is not idx.values
assert s.index is idx
assert s.name == idx.name
# name kwarg
idx = self.create_index()
s = idx.to_series(name='__test')
assert s.values is not idx.values
assert s.index is not idx
assert s.name != idx.name
@pytest.mark.parametrize("name", [None, "new_name"])
def test_to_frame(self, name):
# see GH-15230, GH-22580
idx = self.create_index()
if name:
idx_name = name
else:
idx_name = idx.name or 0
df = idx.to_frame(name=idx_name)
assert df.index is idx
assert len(df.columns) == 1
assert df.columns[0] == idx_name
assert df[idx_name].values is not idx.values
df = idx.to_frame(index=False, name=idx_name)
assert df.index is not idx
def test_to_frame_datetime_tz(self):
# GH 25809
idx = pd.date_range(start='2019-01-01', end='2019-01-30', freq='D')
idx = idx.tz_localize('UTC')
result = idx.to_frame()
expected = pd.DataFrame(idx, index=idx)
tm.assert_frame_equal(result, expected)
def test_shift(self):
# GH8083 test the base class for shift
idx = self.create_index()
msg = "Not supported for type {}".format(type(idx).__name__)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1, 2)
def test_create_index_existing_name(self):
# GH11193, when an existing index is passed, and a new name is not
# specified, the new index should inherit the previous object name
expected = self.create_index()
if not isinstance(expected, MultiIndex):
expected.name = 'foo'
result = pd.Index(expected)
tm.assert_index_equal(result, expected)
result = pd.Index(expected, name='bar')
expected.name = 'bar'
tm.assert_index_equal(result, expected)
else:
expected.names = ['foo', 'bar']
result = pd.Index(expected)
tm.assert_index_equal(
result, Index(Index([('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('baz', 'two'),
('qux', 'one'), ('qux', 'two')],
dtype='object'),
names=['foo', 'bar']))
result = pd.Index(expected, names=['A', 'B'])
tm.assert_index_equal(
result,
Index(Index([('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')],
dtype='object'), names=['A', 'B']))
def test_numeric_compat(self):
idx = self.create_index()
with pytest.raises(TypeError, match="cannot perform __mul__"):
idx * 1
with pytest.raises(TypeError, match="cannot perform __rmul__"):
1 * idx
div_err = "cannot perform __truediv__"
with pytest.raises(TypeError, match=div_err):
idx / 1
div_err = div_err.replace(' __', ' __r')
with pytest.raises(TypeError, match=div_err):
1 / idx
with pytest.raises(TypeError, match="cannot perform __floordiv__"):
idx // 1
with pytest.raises(TypeError, match="cannot perform __rfloordiv__"):
1 // idx
def test_logical_compat(self):
idx = self.create_index()
with pytest.raises(TypeError, match='cannot perform all'):
idx.all()
with pytest.raises(TypeError, match='cannot perform any'):
idx.any()
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
with pytest.raises(ValueError, match='The truth value of a'):
if idx:
pass
def test_reindex_base(self):
idx = self.create_index()
expected = np.arange(idx.size, dtype=np.intp)
actual = idx.get_indexer(idx)
tm.assert_numpy_array_equal(expected, actual)
with pytest.raises(ValueError, match='Invalid fill method'):
idx.get_indexer(idx, method='invalid')
def test_get_indexer_consistency(self):
# See GH 16819
for name, index in self.indices.items():
if isinstance(index, IntervalIndex):
continue
if index.is_unique or isinstance(index, CategoricalIndex):
indexer = index.get_indexer(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
else:
e = "Reindexing only valid with uniquely valued Index objects"
with pytest.raises(InvalidIndexError, match=e):
index.get_indexer(index[0:2])
indexer, _ = index.get_indexer_non_unique(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
def test_ndarray_compat_properties(self):
idx = self.create_index()
assert idx.T.equals(idx)
assert idx.transpose().equals(idx)
values = idx.values
for prop in self._compat_props:
assert getattr(idx, prop) == getattr(values, prop)
# test for validity
idx.nbytes
idx.values.nbytes
def test_repr_roundtrip(self):
idx = self.create_index()
tm.assert_index_equal(eval(repr(idx)), idx)
def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
assert "'foo'" in str(idx)
assert idx.__class__.__name__ in str(idx)
def test_repr_max_seq_item_setting(self):
# GH10182
idx = self.create_index()
idx = idx.repeat(50)
with pd.option_context("display.max_seq_items", None):
repr(idx)
assert '...' not in str(idx)
def test_copy_name(self):
# gh-12309: Check that the "name" argument
# passed at initialization is honored.
for name, index in compat.iteritems(self.indices):
if isinstance(index, MultiIndex):
continue
first = index.__class__(index, copy=True, name='mario')
second = first.__class__(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
# Not using tm.assert_index_equal() since names differ.
assert index.equals(first)
assert first.name == 'mario'
assert second.name == 'mario'
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
if not isinstance(index, CategoricalIndex):
# See gh-13365
s3 = s1 * s2
assert s3.index.name == 'mario'
def test_ensure_copied_data(self):
# Check the "copy" argument of each Index.__new__ is honoured
# GH12309
for name, index in compat.iteritems(self.indices):
init_kwargs = {}
if isinstance(index, PeriodIndex):
# Needs "freq" specification:
init_kwargs['freq'] = index.freq
elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)):
# RangeIndex cannot be initialized from data
# MultiIndex and CategoricalIndex are tested separately
continue
index_type = index.__class__
result = index_type(index.values, copy=True, **init_kwargs)
tm.assert_index_equal(index, result)
tm.assert_numpy_array_equal(index._ndarray_values,
result._ndarray_values,
check_same='copy')
if isinstance(index, PeriodIndex):
# .values an object array of Period, thus copied
result = index_type(ordinal=index.asi8, copy=False,
**init_kwargs)
tm.assert_numpy_array_equal(index._ndarray_values,
result._ndarray_values,
check_same='same')
elif isinstance(index, IntervalIndex):
# checked in test_interval.py
pass
else:
result = index_type(index.values, copy=False, **init_kwargs)
tm.assert_numpy_array_equal(index.values, result.values,
check_same='same')
tm.assert_numpy_array_equal(index._ndarray_values,
result._ndarray_values,
check_same='same')
def test_memory_usage(self):
for name, index in compat.iteritems(self.indices):
result = index.memory_usage()
if len(index):
index.get_loc(index[0])
result2 = index.memory_usage()
result3 = index.memory_usage(deep=True)
# RangeIndex, IntervalIndex
# don't have engines
if not isinstance(index, (RangeIndex, IntervalIndex)):
assert result2 > result
if index.inferred_type == 'object':
assert result3 > result2
else:
# we report 0 for no-length
assert result == 0
def test_argsort(self):
for k, ind in self.indices.items():
# separately tested
if k in ['catIndex']:
continue
result = ind.argsort()
expected = np.array(ind).argsort()
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_numpy_argsort(self):
for k, ind in self.indices.items():
result = np.argsort(ind)
expected = ind.argsort()
tm.assert_numpy_array_equal(result, expected)
# these are the only two types that perform
# pandas compatibility input validation - the
# rest already perform separate (or no) such
# validation via their 'values' attribute as
# defined in pandas.core.indexes/base.py - they
# cannot be changed at the moment due to
# backwards compatibility concerns
if isinstance(type(ind), (CategoricalIndex, RangeIndex)):
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(ind, axis=1)
msg = "the 'kind' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(ind, kind='mergesort')
msg = "the 'order' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(ind, order=('a', 'b'))
def test_take(self):
indexer = [4, 3, 0, 2]
for k, ind in self.indices.items():
# separate
if k in ['boolIndex', 'tuples', 'empty']:
continue
result = ind.take(indexer)
expected = ind[indexer]
assert result.equals(expected)
if not isinstance(ind,
(DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# GH 10791
with pytest.raises(AttributeError):
ind.freq
def test_take_invalid_kwargs(self):
idx = self.create_index()
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
idx.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode='clip')
def test_repeat(self):
rep = 2
i = self.create_index()
expected = pd.Index(i.values.repeat(rep), name=i.name)
tm.assert_index_equal(i.repeat(rep), expected)
i = self.create_index()
rep = np.arange(len(i))
expected = pd.Index(i.values.repeat(rep), name=i.name)
tm.assert_index_equal(i.repeat(rep), expected)
def test_numpy_repeat(self):
rep = 2
i = self.create_index()
expected = i.repeat(rep)
tm.assert_index_equal(np.repeat(i, rep), expected)
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.repeat(i, rep, axis=0)
@pytest.mark.parametrize('klass', [list, tuple, np.array, Series])
def test_where(self, klass):
i = self.create_index()
cond = [True] * len(i)
result = i.where(klass(cond))
expected = i
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(i[1:])
expected = pd.Index([i._na_value] + i[1:].tolist(), dtype=i.dtype)
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("case", [0.5, "xxx"])
@pytest.mark.parametrize("method", ["intersection", "union",
"difference", "symmetric_difference"])
def test_set_ops_error_cases(self, case, method):
for name, idx in compat.iteritems(self.indices):
# non-iterable input
msg = "Input must be Index or array-like"
with pytest.raises(TypeError, match=msg):
getattr(idx, method)(case)
def test_intersection_base(self):
for name, idx in compat.iteritems(self.indices):
first = idx[:5]
second = idx[:3]
intersect = first.intersection(second)
if isinstance(idx, CategoricalIndex):
pass
else:
assert tm.equalContents(intersect, second)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with pytest.raises(ValueError, match=msg):
first.intersection(case)
elif isinstance(idx, CategoricalIndex):
pass
else:
result = first.intersection(case)
assert tm.equalContents(result, second)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.intersection([1, 2, 3])
def test_union_base(self):
for name, idx in compat.iteritems(self.indices):
first = idx[3:]
second = idx[:5]
everything = idx
union = first.union(second)
assert tm.equalContents(union, everything)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with pytest.raises(ValueError, match=msg):
first.union(case)
elif isinstance(idx, CategoricalIndex):
pass
else:
result = first.union(case)
assert tm.equalContents(result, everything)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.union([1, 2, 3])
@pytest.mark.parametrize("sort", [None, False])
def test_difference_base(self, sort):
for name, idx in compat.iteritems(self.indices):
first = idx[2:]
second = idx[:4]
answer = idx[4:]
result = first.difference(second, sort)
if isinstance(idx, CategoricalIndex):
pass
else:
assert tm.equalContents(result, answer)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with pytest.raises(ValueError, match=msg):
first.difference(case, sort)
elif isinstance(idx, CategoricalIndex):
pass
elif isinstance(idx, (DatetimeIndex, TimedeltaIndex)):
assert result.__class__ == answer.__class__
tm.assert_numpy_array_equal(result.sort_values().asi8,
answer.sort_values().asi8)
else:
result = first.difference(case, sort)
assert tm.equalContents(result, answer)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.difference([1, 2, 3], sort)
def test_symmetric_difference(self):
for name, idx in compat.iteritems(self.indices):
first = idx[1:]
second = idx[:-1]
if isinstance(idx, CategoricalIndex):
pass
else:
answer = idx[[0, -1]]
result = first.symmetric_difference(second)
assert tm.equalContents(result, answer)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with pytest.raises(ValueError, match=msg):
first.symmetric_difference(case)
elif isinstance(idx, CategoricalIndex):
pass
else:
result = first.symmetric_difference(case)
assert tm.equalContents(result, answer)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.symmetric_difference([1, 2, 3])
def test_insert_base(self):
for name, idx in compat.iteritems(self.indices):
result = idx[1:4]
if not len(idx):
continue
# test 0th element
assert idx[0:4].equals(result.insert(0, idx[0]))
def test_delete_base(self):
for name, idx in compat.iteritems(self.indices):
if not len(idx):
continue
if isinstance(idx, RangeIndex):
# tested in class
continue
expected = idx[1:]
result = idx.delete(0)
assert result.equals(expected)
assert result.name == expected.name
expected = idx[:-1]
result = idx.delete(-1)
assert result.equals(expected)
assert result.name == expected.name
with pytest.raises((IndexError, ValueError)):
# either depending on numpy version
idx.delete(len(idx))
def test_equals(self):
for name, idx in compat.iteritems(self.indices):
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(np.array(idx))
# Cannot pass in non-int64 dtype to RangeIndex
if not isinstance(idx, RangeIndex):
same_values = Index(idx, dtype=object)
assert idx.equals(same_values)
assert same_values.equals(idx)
if idx.nlevels == 1:
# do not test MultiIndex
assert not idx.equals(pd.Series(idx))
def test_equals_op(self):
# GH9947, GH10637
index_a = self.create_index()
if isinstance(index_a, PeriodIndex):
pytest.skip('Skip check for PeriodIndex')
n = len(index_a)
index_b = index_a[0:-1]
index_c = index_a[0:-1].append(index_a[-2:-1])
index_d = index_a[0:1]
msg = "Lengths must match|could not be broadcast"
with pytest.raises(ValueError, match=msg):
index_a == index_b
expected1 = np.array([True] * n)
expected2 = np.array([True] * (n - 1) + [False])
tm.assert_numpy_array_equal(index_a == index_a, expected1)
tm.assert_numpy_array_equal(index_a == index_c, expected2)
# test comparisons with numpy arrays
array_a = np.array(index_a)
array_b = np.array(index_a[0:-1])
array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))
array_d = np.array(index_a[0:1])
with pytest.raises(ValueError, match=msg):
index_a == array_b
tm.assert_numpy_array_equal(index_a == array_a, expected1)
tm.assert_numpy_array_equal(index_a == array_c, expected2)
# test comparisons with Series
series_a = Series(array_a)
series_b = | Series(array_b) | pandas.Series |
# -*- coding: utf-8 -*-
"""
dopplertext is a program to convert Doppler parameters stored on DCM images of PW Doppler into a readable, useable format.
Copyright (c) 2018 <NAME>.
This file is part of dopplertext.
dopplertext is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
dopplertext is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with dopplertext. If not, see <http://www.gnu.org/licenses/>.
dopplertext Created on Thu July 12 14:41:18 2018
@author: gordon
"""
import skimage.io
import skimage.color
import skimage.util
from skimage.feature import match_template
import pandas as pd
import imghdr
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import pydicom as dicom
except:
import dicom
import numpy as np
import os, glob, sys
from gooey import Gooey, GooeyParser
@Gooey(default_size=(800,600))
def main():
parser = GooeyParser(description="GE DICOM Text Parser")
parser.add_argument('outputfile', help='Select Output Spreadsheet Filename',widget="FileChooser")
area_threshold_group = parser.add_mutually_exclusive_group(required=True)
area_threshold_group.add_argument('--inputfile',
help='Input DCM File', widget='FileChooser')
area_threshold_group.add_argument('--inputdir',
help='Input DCM Directory', widget='DirChooser')
args = parser.parse_args()
runFile(args)
def loadImgDict():
if not os.path.isfile('image_dictionary.pickle'):
raise NotImplementedError
try:
with open('image_dictionary.pickle', 'rb') as handle:
image_dictionary = pickle.load(handle)
except UnicodeDecodeError as e:
with open('image_dictionary.pickle', 'rb') as f:
image_dictionary = pickle.load(f, encoding='latin1')
return image_dictionary
def runFile(args):
"""
:param args: dictionary
:return:
"""
inputfile = args.inputfile
inputdir = args.inputdir
image_dictionary = loadImgDict()
output_df = []
final_df = pd.DataFrame()
if inputfile is not None:
print('Analysing Single File....')
inputfile = inputfile.replace('\\', '/')
output_df, y_order = getTextFromDCMFile(inputfile, image_dictionary)
output_df = pd.DataFrame(output_df)
output_df['FileName'] = [inputfile[inputfile.rfind('/') + 1:] for i in range(len(output_df))]
output_df = output_df.iloc[np.argsort(y_order)].reset_index(drop=True)
write_df = pd.DataFrame(output_df)
print('done!')
if inputdir is not None:
file_list = glob.glob(args.inputdir + '/*')
print('Analysing Each DCM File....')
final_df = | pd.DataFrame([]) | pandas.DataFrame |
"""
[Optional] When using db_out.csv check consistency:
should be the same ID column in raw and out.
With this I won't need the ID column at all.
"""
import os.path as p
from typing import NamedTuple, List, Tuple, Callable, Dict, Any
import ast
import pandas as pd
from pandas import DataFrame
import numpy as np
# noinspection PyPep8Naming
from numpy import ndarray as Array
from kiwi_bugfix_typechecker import test_assert
BETTER: bool = False # use good enough types or better types
test_assert()
TO_DICT: Dict[str, Dict[int, int]] = dict(
dom={
1: 1, 2: 2, 3: 3, 4: 4,
5: 5, 6: 2, 7: 6, 8: 4,
9: 5, 10: 7, 11: 6, 12: 8,
13: 1, 14: 7, 15: 3, 16: 8
},
temper={
1: 1, 2: 2, 3: 3, 4: 4,
5: 1, 6: 2, 7: 3, 8: 4,
9: 1, 10: 2, 11: 3, 12: 4,
13: 1, 14: 2, 15: 3, 16: 4
},
quadra_club={
1: 1, 2: 1, 3: 2, 4: 2,
5: 3, 6: 3, 7: 4, 8: 4,
9: 5, 10: 5, 11: 6, 12: 6,
13: 7, 14: 7, 15: 8, 16: 8
},
quadra={
1: 1, 2: 1, 3: 1, 4: 1,
5: 2, 6: 2, 7: 2, 8: 2,
9: 3, 10: 3, 11: 3, 12: 3,
13: 4, 14: 4, 15: 4, 16: 4
},
club={
1: 1, 2: 1, 3: 2, 4: 2,
5: 3, 6: 3, 7: 4, 8: 4,
9: 2, 10: 2, 11: 1, 12: 1,
13: 4, 14: 4, 15: 3, 16: 3
},
mr={
1: 1, 2: 2, 3: 1, 4: 2,
5: 1, 6: 2, 7: 1, 8: 2,
9: 1, 10: 2, 11: 1, 12: 2,
13: 1, 14: 2, 15: 1, 16: 2
},
ei={
1: 1, 2: 2, 3: 2, 4: 1,
5: 1, 6: 2, 7: 2, 8: 1,
9: 1, 10: 2, 11: 2, 12: 1,
13: 1, 14: 2, 15: 2, 16: 1
},
mr_tf={
1: 1, 2: 2, 3: 3, 4: 4,
5: 1, 6: 2, 7: 3, 8: 4,
9: 3, 10: 4, 11: 1, 12: 2,
13: 3, 14: 4, 15: 1, 16: 2
},
xir_xer={
1: 1, 2: 1, 3: 2, 4: 2,
5: 1, 6: 1, 7: 2, 8: 2,
9: 1, 10: 1, 11: 2, 12: 2,
13: 1, 14: 1, 15: 2, 16: 2
},
lc={
1: 1, 2: 1, 3: 1, 4: 1,
5: 2, 6: 2, 7: 2, 8: 2,
9: 2, 10: 2, 11: 2, 12: 2,
13: 1, 14: 1, 15: 1, 16: 1
},
)
BOLTI = 'BOLTI_434__2017_08_18__N3197'
SOLTI = 'SOLTI_160__2016_03_20__N6406'
SOLTI_ENG = 'SOLTI_160_ENG__2019_08_07__NXXXX'
EXTRA_QUESTIONS = ['sex in (female, male)', 'age in (0-20, 21-25, 26-30, 31-40, 41-100)']
EXTRA_COLUMNS_IDS = ['sex', 'age']
SOLTI_ENG_ID_Δ = 20000
# MISSING_TYPES = (3, 4, 9, 10, 16) # (3, 4, 10, 12, 16)
MISSING_TYPES = tuple(range(1, 17))
class DB(NamedTuple):
"""
6000 is an example number of completed questionnaires.
160 is an example number of questions in the questionnaire.
Attributes:
-----------
profiles : np.ndarray
2D like (~6000, ~2 + ~160) shape
df : pd.DataFrame
2D like (~6000, ~8 + ~160) shape
types_tal : np.ndarray
1D like (~6000,) shape; in {1, ..., 16}
types_self : np.ndarray
1D like (~6000,) shape; in {-1, 1, ..., 16}
types_tal_sex : np.ndarray
1D like (~6000,) shape; in {1, ..., 16, 17, ..., 32} females first
types_smart_coincide : np.ndarray
1D like (~6000,) shape; in {-16, ..., -1, 1, ..., 16}
questions : List[Tuple[str, str]]
2D like (~160, ~2) shape. First one is original Russian question.
Second one is autotranslated English question.
interesting : np.ndarray
2D like (~10, ~2) shape. Interesting indexes (not IDs!).
First is a real type. Second is index.
"""
profiles: Array
df: DataFrame
types_tal: Array
types_self: Array
types_tal_sex: Array
questions: List[Tuple[str, str]]
interesting: Array
type_smart_coincide: Array
other_smart_coincide: Dict[str, Array]
other_y_dim: Dict[str, int]
class DBSpec:
name: str
_reader: Tuple[Callable[[], DB]]
h_dims: Tuple[int, ...]
def __init__(self, name: str, reader: Callable[[], DB], h_dims: Tuple[int, ...]):
self.name = name
self.h_dims = h_dims
self._reader = (reader,)
@property
def reader(self) -> Callable[[], DB]:
return self._reader[0]
def dump(self) -> Dict[str, Any]:
return dict(name=self.name, h_dims=self.h_dims)
def read_profiles(profile_name: str) -> Tuple[DataFrame, DataFrame, List[str], List[str], List[List[int]]]:
"""
* Questions lists are prepended with 'sex' and 'age' descriptions.
* ``sex`` is mapped: 0 to 1, 1 to 5.
* ``self`` empty string is mapped to 0.
* ``confidence`` empty string and ``'None'`` is mapped to ``-1``.
:param profile_name:
:return:
(raw_data_frame, out_data_frame, questions, questions_eng, interesting_ids)
"""
_dir = p.join(p.dirname(p.abspath(__file__)), profile_name)
def _questions(file_name: str):
with open(p.join(_dir, file_name), 'r', encoding='utf-8') as f_:
return EXTRA_QUESTIONS + f_.read().strip().splitlines()
df_raw = pd.read_csv(p.join(_dir, 'db_raw.csv'), converters=dict(
sex=lambda s: 5 if (int(s) == 1) else 1,
self=lambda s: int(s) if s else -1,
confidence=lambda s: int(s) if s not in ('None', '') else -1,
))
df_out = pd.read_csv(p.join(_dir, 'db_out.csv'))
if len(df_out) != len(df_raw):
raise ValueError('Inconsistent db_raw.csv and db_out.csv length.')
questions = _questions('questions.txt')
questions_eng = _questions('questions_autotranslated.txt')
columns = df_raw.columns.values.tolist()
if ('sex' not in columns) or ('age' not in columns):
raise ValueError("Either 'sex' or 'age' is not in the columns names.")
quest_n = len([name for name in columns if name.isdigit()]) + len(EXTRA_QUESTIONS)
if (quest_n != len(questions)) or (quest_n != len(questions_eng)):
raise ValueError("Inconsistent number of questions.")
with open(p.join(_dir, 'interesting_ids.ast'), 'r', encoding='utf-8') as f:
interesting_ids = ast.literal_eval(f.read())
# patch ids
add_file = p.join(_dir, 'dbo_ids_add.ast')
remove_file = p.join(_dir, 'dbo_ids_remove.ast')
add_ids: List[int] = []
remove_ids: List[int] = []
if p.isfile(add_file):
with open(add_file, 'r', encoding='utf-8') as f:
add_ids = ast.literal_eval(f.read())
if p.isfile(remove_file):
with open(remove_file, 'r', encoding='utf-8') as f:
remove_ids = ast.literal_eval(f.read())
Δ = SOLTI_ENG_ID_Δ if (profile_name == SOLTI_ENG) else 0
remove_ids = [i + Δ for i in remove_ids if i not in add_ids]
ids = df_raw['id'].values
if remove_ids:
idxs = [idx for idx, id_ in enumerate(ids) if id_ in remove_ids]
df_raw = df_raw.drop(df_raw.index[idxs])
df_out = df_out.drop(df_out.index[idxs])
ids = df_raw['id'].values
elif profile_name == SOLTI_ENG:
raise ValueError
main_quest_n = quest_n - len(EXTRA_QUESTIONS)
profs = df_raw[[str(i) for i in range(1, main_quest_n + 1)]].values
max_same_quest = np.array([np.max(np.unique(row, return_counts=True)[1]) for row in profs])
del_mask = max_same_quest > np.mean(max_same_quest) + 4 * np.std(max_same_quest)
if len(ids[del_mask]) > 0:
del_ids = ids[del_mask] - Δ
raise ValueError(f'These IDs have too many same questions: (for {profile_name}): {list(del_ids)}'
+ f' (counts: {list(max_same_quest[del_mask])})')
corr = np.corrcoef(profs) - np.eye(len(profs))
if np.isnan(corr).any():
del_ids = ids[list(set(i for i, row in enumerate(corr) if all(np.isnan(row))))] - Δ
raise ValueError(f'These IDs give NaN correlations with other IDs (for {profile_name}): {list(del_ids)}')
mask = np.max(corr, axis=1) >= 0.99
if len(corr[mask]) > 0:
idxs = np.arange(0, len(profs))
has_equals = [int(i) for i in idxs[mask]]
del_idxs: List[int] = []
for i in has_equals.copy():
for j, c in enumerate(corr[i]):
if (c >= 0.99) and (i in has_equals):
del_idxs.append(j)
has_equals = [s for s in has_equals if s != j]
assert len(has_equals) == len(set(has_equals))
del_ids = ids[del_idxs] - Δ
raise ValueError(f'Duplicate profiles. Recommended to delete IDs (for {profile_name}): {list(del_ids)}')
return df_raw, df_out, questions, questions_eng, interesting_ids
def types_tal_good_mask(df_out: DataFrame, tal_profs: Array,
second_type_gap_pc_thr: int=70,
k_the_sigma_thr: float=-2,
k_halves_correl_thr: float=-2) -> Array:
"""
:param df_out: of shape (~6000, K)
:param tal_profs: of shape (~6000, 16)
:param second_type_gap_pc_thr: threshold for second type.
Default: the 2nd type should be <= 67% of the 1st type.
:param k_the_sigma_thr: threshold is mean(the_sigma) + k_the_sigma_thr * std(the_sigma)
:param k_halves_correl_thr: threshold is mean(halves_correl) + k_halves_correl_thr * std(halves_correl)
:return: bool mask of shape (~6000,) that is True when profile is "good".
"""
sort = np.sort(tal_profs, axis=-1)
sort[sort <= 0] = 0
sort = np.round(np.einsum('ij,i->ij', sort, np.max(tal_profs, axis=-1)**-1) * 100).astype(int)
second_type_gap_mask = (sort[:, -1] == 100) & (sort[:, -2] <= second_type_gap_pc_thr)
the_sigma = df_out['sigma_of_the_profile'].values
the_sigma_mask = the_sigma >= (np.mean(the_sigma) + k_the_sigma_thr * np.std(the_sigma))
halves_correl = df_out['correl_of_the_halves'].values
halves_correl_mask = halves_correl >= (np.mean(halves_correl) + k_halves_correl_thr * np.std(halves_correl))
return second_type_gap_mask & the_sigma_mask & halves_correl_mask
def _smart_coincide(
tal_profs: Array, types_self: Array, types_tal: Array, threshold: int=-80,
thresholds: Tuple[Tuple[int, Tuple[int, ...]], ...]=(), labels: str='type') -> Array:
"""
>>> TO_DICT
Old: threshold=90, thresholds_plus=((81, (4, 8, 16)),)
:param tal_profs: of shape (~6000, 16) of float
:param types_self: of shape (~6000,) from {-1, 1, ..., 16}
:param types_tal: of shape (~6000,) from {1, ..., 16}
:param threshold: default threshold for smart Talanov's types.
Positive: self type can be from a set of Talanov's types formed by threshold percent from max type scale.
Zero: self type should coincide with Talanov's.
Negative: self type should coincide with Talanov's and additionally the next type
should be not closer than threshold percent from max type scale.
:param thresholds: custom thresholds per type like ((81, (4, 16)),) that would turn into {81: (4, 16)}
:param labels: for other values see keys of TO_DICT const
:return: of shape (~6000,) in {-16, ..., -1, 1, ..., 16} positive when smart coincided
"""
if len(tal_profs) != len(types_self):
raise ValueError('Inconsistent tal_profs and types_self length.')
tal_profs = np.round(np.einsum('ij,i->ij', tal_profs, np.max(tal_profs, axis=1)**-1) * 100).astype(int)
types_tal_one_hot = np.eye(16).astype(int)[types_tal - 1]
def trimmed_tal_profs(thr: int) -> Array:
if thr == 0:
return 100 * types_tal_one_hot
ret = np.copy(tal_profs)
ret[ret < abs(thr)] = 0
return ret
thr_types = dict(thresholds)
defined = [i for types in thr_types.values() for i in types]
assert (len(defined) == len(set(defined))) and (threshold not in thr_types)
thr_types[threshold] = tuple(i for i in range(1, 17) if i not in defined)
absthr_arr: Dict[int, Array] = {thr: trimmed_tal_profs(thr) for thr in set(abs(t) for t in thr_types) | {0}}
def get_thr(type_: int) -> int:
if type_ == -1:
return 0
for thr, types in thr_types.items():
if type_ in types:
return thr
raise AssertionError
type_thr: Dict[int, int] = {i: get_thr(i) for i in range(-1, 17) if i != 0}
tal_profs_ = np.array([absthr_arr[abs(type_thr[n])][i] for i, n in enumerate(types_self)])
if labels == 'type':
map_: Dict[int, int] = {i: i for i in range(1, 17)}
else:
map_ = TO_DICT[labels]
def kernel(bests: List[int], self: int, tal: int, thr_pos: bool) -> int:
assert tal >= 1
if self < 1:
return -tal
if thr_pos:
return self if (self in bests) else -tal
return self if (len(bests) == 1) and (bests[0] == self) else -tal
smart_coin = np.array([kernel(
bests=[map_[int(s)] for s in list(np.where(row > abs(type_thr[self]))[0] + 1)],
self=map_[int(self)] if (self >= 1) else -1,
tal=map_[int(tal)],
thr_pos=type_thr[self] > 0
) for row, self, tal in zip(tal_profs_, types_self, types_tal)])
return smart_coin
def smart_coincide_db(tal_profs: Array, types_self: Array, types_tal: Array, males: Array,
threshold: int=90,
labels: str='type',
thresholds_males: Tuple[Tuple[int, Tuple[int, ...]], ...]=(),
thresholds_females: Tuple[Tuple[int, Tuple[int, ...]], ...]=()) -> Array:
smart_coin_males = _smart_coincide(
tal_profs=tal_profs, types_self=types_self, types_tal=types_tal, labels=labels, threshold=threshold,
thresholds=thresholds_males
)
smart_coin_females = _smart_coincide(
tal_profs=tal_profs, types_self=types_self, types_tal=types_tal, labels=labels, threshold=threshold,
thresholds=thresholds_females
)
smart_coin = smart_coin_females
smart_coin[males] = smart_coin_males[males]
return smart_coin
def smart_coincide_solti_good(
tal_profs: Array, types_self: Array, types_tal: Array, males: Array,
threshold: int=90,
labels: str='type',
thresholds_males: Tuple[Tuple[int, Tuple[int, ...]], ...]=(
(81, (4,)),
),
thresholds_females: Tuple[Tuple[int, Tuple[int, ...]], ...]=()) -> Array:
return smart_coincide_db(tal_profs=tal_profs, types_self=types_self, types_tal=types_tal, males=males,
threshold=threshold, labels=labels,
thresholds_males=thresholds_males, thresholds_females=thresholds_females)
def smart_coincide_solti_better(
tal_profs: Array, types_self: Array, types_tal: Array, males: Array,
threshold: int=-80,
labels: str='type',
thresholds_males: Tuple[Tuple[int, Tuple[int, ...]], ...]=(
(-99, (16,)), (-90, (3, 4)), # old: (81, (3,)), (-97, (3, 16)),
),
thresholds_females: Tuple[Tuple[int, Tuple[int, ...]], ...]=(
(-90, (16,)), # old: (-95, (16,)),
)) -> Array:
return smart_coincide_db(tal_profs=tal_profs, types_self=types_self, types_tal=types_tal, males=males,
threshold=threshold, labels=labels,
thresholds_males=thresholds_males, thresholds_females=thresholds_females)
def smart_coincide_bolti(
tal_profs: Array, types_self: Array, types_tal: Array, males: Array,
threshold: int=90,
labels: str='type',
thresholds_males: Tuple[Tuple[int, Tuple[int, ...]], ...]=(
(81, (3, 4, 16)),
),
thresholds_females: Tuple[Tuple[int, Tuple[int, ...]], ...]=()) -> Array:
return smart_coincide_db(tal_profs=tal_profs, types_self=types_self, types_tal=types_tal, males=males,
threshold=threshold, labels=labels,
thresholds_males=thresholds_males, thresholds_females=thresholds_females)
def preprocess_profiles(df_raw: DataFrame,
df_out: DataFrame,
questions: List[str],
questions_eng: List[str],
select_columns: List[str],
interesting_indexes: Array,
db_name: str=None) -> DB:
tal_profs = df_out.loc[:, [str(i) for i in range(1, 17)]].values
types_self = df_raw['self'].values
types_tal = df_raw['diagnosis'].values
profiles = df_raw.loc[:, select_columns].values
sex = profiles[:, 0]
if tuple(np.unique(types_tal)) != tuple(range(1, 17)):
raise ValueError
if tuple(np.unique(types_self)) != ((-1,) + tuple(range(1, 17))):
raise ValueError
if tuple(np.unique(sex)) != (1, 5):
raise ValueError
def _types_self_extra() -> Array:
good = types_tal_good_mask(df_out=df_out, tal_profs=tal_profs)
good_no_self = good & (types_self == -1) # & (sex == 5)
types_ = types_tal == MISSING_TYPES[0]
for type_ in MISSING_TYPES[1:]:
types_ = types_ | (types_tal == type_)
good_no_self_types = good_no_self & types_
types_self_extra_ = np.copy(types_self)
types_self_extra_[good_no_self_types] = types_tal[good_no_self_types]
if db_name == 'bolti':
special_type = 4
good_special = types_tal_good_mask(
df_out=df_out, tal_profs=tal_profs,
second_type_gap_pc_thr=90, k_the_sigma_thr=-2, k_halves_correl_thr=-2)
good_no_self_special = good_special & (types_self == -1) & (types_tal == special_type)
# print(len(types_self[good_no_self_special]))
# raise
types_self_extra_[good_no_self_special] = types_tal[good_no_self_special]
return types_self_extra_
types_self_extra = _types_self_extra()
types_tal_ = np.copy(types_tal)
males = profiles[:, 0] == 5
if db_name == 'bolti':
smart_coincide = smart_coincide_bolti
elif BETTER:
smart_coincide = smart_coincide_solti_better
else:
smart_coincide = smart_coincide_solti_good
def smart_coincide_(labels: str) -> Array:
return smart_coincide(tal_profs=tal_profs, types_self=types_self_extra, types_tal=types_tal_, males=males,
labels=labels)
type_smart_coin = smart_coincide_('type')
type_smart_mask = type_smart_coin > 0
types_tal[type_smart_mask] = type_smart_coin[type_smart_mask]
# print(df, '\n-----------------------\n', profiles, '\n\n', profiles.shape)
if len({len(profiles), len(df_raw), len(types_self), len(types_tal)}) != 1:
raise ValueError('Data has inconsistent dimensions.')
types_tal_sex = np.copy(types_tal)
types_tal_sex[males] += 16
return DB(profiles=profiles, df=df_raw, types_tal=types_tal, types_self=types_self, types_tal_sex=types_tal_sex,
questions=list(zip(questions, questions_eng)), interesting=interesting_indexes,
type_smart_coincide=type_smart_coin,
other_smart_coincide={tp: smart_coincide_(tp) for tp in TO_DICT},
other_y_dim={tp: len(set(TO_DICT[tp].values())) for tp in TO_DICT})
def ids_to_idx(interesting_ids: List[List[int]], ids: Array) -> Array:
inters_idxs = [np.argmin(np.abs(ids - id_)) for id_ in [s[-1] for s in interesting_ids]]
# inters_idxs = [idx for idx, id_ in enumerate(ids) if id_ in [s[-1] for s in interesting_ids]]
if len(interesting_ids) != len(inters_idxs):
raise AssertionError(interesting_ids, inters_idxs)
interesting_indexes = np.array([[lst[0], idx] for lst, idx in zip(interesting_ids, inters_idxs)])
return interesting_indexes
def read_bolti_434() -> DB:
_df_raw, _df_out, questions, questions_eng, interesting_ids = read_profiles(BOLTI)
mask = _df_raw['goal'] != 3
df_raw: DataFrame = _df_raw.loc[mask]
df_out: DataFrame = _df_out.loc[mask]
if len(_df_raw) == len(df_raw):
raise ValueError('Selecting (goal != 3) failed.')
interesting_indexes = ids_to_idx(interesting_ids, df_raw['id'].values)
columns = EXTRA_COLUMNS_IDS + [str(i) for i in range(1, 430 + 1)]
return preprocess_profiles(df_raw=df_raw, df_out=df_out, questions=questions, questions_eng=questions_eng,
select_columns=columns, interesting_indexes=interesting_indexes, db_name='bolti')
def read_solti_160() -> DB:
df_ru, df_out_ru, questions, questions_eng, interesting_ids_ru = read_profiles(SOLTI)
df_en, df_out_en, _, _, interesting_ids_en = read_profiles(SOLTI_ENG)
out_columns = ['sigma_of_the_profile', 'correl_of_the_halves'] + [str(i) for i in range(1, 17)]
df_raw: DataFrame = pd.concat([df_ru, df_en], ignore_index=True)
df_out: DataFrame = | pd.concat([df_out_ru[out_columns], df_out_en[out_columns]], ignore_index=True) | pandas.concat |
import os
import pandas as pd
from pandas.io.json import json_normalize
import streamlit as st
from typing import List
import streamlit.components.v1 as components
from awesome_table.column import (ColumnDType, Column)
_RELEASE = True
class AwesomeTable():
"""AwesomeTable is a component for Streamlit to build a table based in bootstrap with search and order funcionality."""
if _RELEASE:
_root_dir = os.path.dirname(os.path.abspath(__file__))
_build_dir = os.path.join(_root_dir, 'frontend/build')
_awesome_table_ = components.declare_component(
"awesome_table",
path=_build_dir
)
else:
_awesome_table_ = components.declare_component(
"awesome_table",
url='http://localhost:3001'
)
def __init__(self, data: pd.DataFrame, columns: List =[], show_order = False, show_search= False, show_search_order_in_sidebar = False, key = 'awesome_table'):
"""AwesomeTable is a component for Streamlit to build a table based in bootstrap with search and order funcionality.
Can build this table based in a pandas dataframe. The order and search components would be displayed on the sidebar or above the table.
Args:
data (pd.Dataframe): Dataframe to build the table. If you've a JSON data, you can use the `pd.json_normalize(json)` method.
columns (List, optional): Columns that will be displayed in table. You can pass parameters to personalize each. Defaults to [].
show_order (bool, optional): Show order components. Defaults to False.
show_search (bool, optional): Show search components. Defaults to False.
show_search_order_in_sidebar (bool, optional): [description]. Defaults to False.
key (str, optional): Key for identification table. Defaults to 'awesome_table'.
"""
self.data = self.set_data(data, columns)
self.columns = self.set_columns(columns)
self.key = key
self.show_order = show_order
self.show_search = show_search
self.show_search_order_in_sidebar = show_search_order_in_sidebar
self.build_table_content()
self.build_order_component()
AwesomeTable._awesome_table_(data=self.table_content, columns=[column.to_json() for column in self.columns], key=self.key)
def set_data(self, data, columns) -> pd.DataFrame:
"""Set dataframe based in columns passed by parameter.
Args:
data (pd.DataFrame): Dataframe pandas.
columns (List[Column]): List of the columns.
Returns:
pd.Dataframe: Pandas Dataframe based in columns passed by parameter.
"""
if columns is not None and len(columns) > 0:
if type(columns[0]) is str:
data = data[[column for column in columns]]
else:
data = data[[column.name for column in columns]]
for col in [column.name for column in columns if column.dtype == ColumnDType.DATETIME]:
data[col] = pd.to_datetime(data[col])
return data
def set_columns(self, columns):
"""Set columns based in parameters passed by parameter.
Args:
columns (_type_): _description_
Returns:
_type_: _description_
"""
if columns is None or len(columns) == 0:
self.columns = None
return self.get_columns()
if columns is not None and len(columns) > 0 and type(columns[0]) is str:
return [Column(column) for column in columns]
return columns
def get_columns(self):
"""If columns not passed by parameter, return all columns based in pandas Dataframe columns.
Returns:
List[Column]: List of columns.
"""
if self.columns is None or len(self.columns) == 0:
self.columns = list()
for col in self.data.columns:
self.columns.append(Column(col, dtype=ColumnDType.STRING))
return self.columns
def get_column_label_by_name(self, name):
"""Return the label of the column based in the name passed by parameter.
Args:
name (str): Name of the column.
Returns:
str: Return label if exists, else return name.
"""
for column in self.get_columns():
if column.name == name:
return column.get_label()
return None
def get_column_name(self):
"""Return all columns names.
Returns:
List[str]: Columns name
"""
return [column.name for column in self.get_columns()]
def build_table_content(self):
"""Create json to populate table from pandas Dataframe.
"""
data = self.data.copy()
for col in [column for column in self.columns if column.dtype == ColumnDType.DATETIME]:
data[col.name] = | pd.to_datetime(data[col.name]) | pandas.to_datetime |
"""
Pulsar search analysis
"""
import os, glob
import numpy as np
import pylab as plt
import matplotlib.ticker as ticker
import pandas as pd
from astropy.io import fits
from skymaps import SkyDir, Band
from . import (sourceinfo, associations, _html, fermi_catalog)
from .. import tools
from analysis_base import html_table, FloatFormat
from astropy.table import Table
def bigfile( path='$FERMI/catalog/srcid/cat/Pulsars_BigFile_*.fits'):
""""manage look up in the BigFile"""
ff = sorted(glob.glob(os.path.expandvars(path)))
filename = ff[-1]
version = filename.split('_')[-1][:-5]
t= fits.open(filename)
df = pd.DataFrame(t[1].data)
names=[t.strip() for t in df.NAME.values]
jnames=[t.strip() for t in df.PSRJ.values]
psrnames = map(lambda s:'PSR '+s, jnames)
df.index = psrnames
return df
class Pulsars(sourceinfo.SourceInfo):
"""Pulsar plots and analysis
"""
def setup(self, **kw):
super(Pulsars, self).setup(**kw)
self.plotfolder='pulsars'
self.psr = np.asarray([s.startswith('PSR') for s in self.df.index],bool)
plt.rc('font', size=14)
# get the LAT pulsar table as a DataFrame, with index as the name
self.lcat=lcat = glob.glob(os.path.expandvars('$FERMI/catalog/srcid/cat/obj-pulsar-lat_v1*'))[-1]
filename= os.path.split(lcat)[-1]
self.version = filename.split('.')[0][-4:]
print ('Loading LAT pulsar catalog {}'.format(filename))
self.lcatdf = df =Table.read(lcat, hdu=1).to_pandas()
self.lcatdf['msec']=msec = np.array([code.find('m')>-1 for code in df.PSR_Code ], bool)
print ('Found {} entries, {} millisecond pulsars'.format(len(df), sum(msec)))
self.latpsr_info = 'From file {}: {} entries, {} millisecond pulsars'.format(filename,len(df), sum(msec))
df.index= map(lambda name: name.strip(), df.Source_Name.values)
# msec designation to corresponding entries in full source list
self.df['msec'] = self.lcatdf.msec
def load_assoc(df):
# add association info to the data frame
associations = df.associations
probfun = lambda x: x['prob'][0] if not pd.isnull(x) else 0
df['aprob'] = np.array([ probfun(assoc) for assoc in associations])
df['acat'] = np.array([ assoc['cat'][0] if not pd.isnull(assoc) else 'unid' for assoc in associations])
df['aname'] = np.array([ assoc['name'][0] if not pd.isnull(assoc) else 'unid' for assoc in associations])
df['aang'] = np.array([ assoc['ang'][0] if not pd.isnull(assoc) else np.nan for assoc in associations])
df['adeltats'] = np.array([assoc['deltats'][0] if not pd.isnull(assoc) else np.nan for assoc in associations])
load_assoc(self.df)
def check4FGL(self, pattern=None):
# add 4FGL info to dataframe of pointlike soruies
df=self.df
cindex = [n.replace(' ','') for n in self.df.index]
systematic = self.config['localization_systematics']
f95, quad = 2.45*systematic[0], systematic[1]/60.
self.df['r95'] = (f95**2*(self.df.a * self.df.b) + quad**2)** 0.5
# get the catalog "gll" entries as a DataFrame and set corresponding values
if pattern is None:
pattern=self.config['gllcat']
if not pattern.startswith('/'):
pattern = '$FERMI/catalog/'+pattern
filename = sorted(glob.glob(os.path.expandvars(pattern)))[-1]
fcat = fermi_catalog.GLL_PSC2(filename)
self.fhl_file = fcat.filename.split('/')[-1]
self.gdf = gdf= fcat.df
gdf['uw_ts'] = self.df.ts
gdf['uw_r95'] = self.df.r95
gdf['uw_pindex']= self.df.pindex
gdf['uw_eflux100']=self.df.eflux100
# add boolean for in FL8Y
self.df['fl8y'] = np.isin(cindex, gdf.index )
print ('{} of {} have nicknames in pointlike list'.format(sum(df.fl8y), len(gdf)))
# for sources not already tagged via the pointlike name being the same as the gtlike nickname
# look for nearest 4FGL source: add name, its distance to DataFrame
ok = df.fl8y==True
added = np.logical_not(ok)
df.loc[df.index[ok],'otherid']= df[ok].name
df.loc[df.index[ok], 'distance']=0
# look for nearest 4FGL source in rejected list: add name, distance to DataFrame
print ('Searching 4FGL for nearest source to the {} not found in it...'.format(sum(added)),)
close = tools.find_close(df[added], self.gdf)
df.loc[df.index[~ok],'otherid'] = close.otherid
df.loc[df.index[~ok], 'distance'] = close.distance
df['b4fgl'] = df.distance<0.015
df['otherts'] = [self.gdf.loc[s.otherid.replace(' ','')].ts for name,s in df.iterrows() ]
df['other_extended'] = [self.gdf.loc[s.otherid.replace(' ','')].extended for name,s in df.iterrows() ]
print ('done.')
def LATpulsars(self):
""" LAT pulsar information
%(latpsr_info)s
"""
df = self.lcatdf
msec = np.array(df.msec, bool)
def fig1(ax):
ax.loglog(-df.F1[msec],df.F0[msec], 'o', label='Millisecond');
ax.loglog(-df.F1[~msec],df.F0[~msec], 'o', label='Young');
ax.set(xlabel='Frequency derivative [Hz/s]', ylabel='Frequency [Hz]')
ax.grid(alpha=0.5)
ax.legend()
def fig2(ax):
sd = map(SkyDir, df.RAJ2000, df.DEJ2000)
sinb = np.sin(np.radians(map(lambda s:s.b(), sd)))
hkw= dict(bins=np.linspace(-1,1,21), histtype='step', lw=2 )
ax.hist(sinb[msec], label='msec', **hkw)
ax.hist(sinb[~msec], label='young', **hkw)
ax.set(xlabel='sin(b)')
ax.legend(); ax.grid(alpha=0.5)
fig, axx = plt.subplots(1,2, figsize=(12,6))
map( lambda f,ax: f(ax), [fig1,fig2], axx.flatten() )
fig.suptitle('LAT pulsars v{}'.format(self.version))
return fig
def spectra(self, index_min=0.0, index_max=2.5, cutoff_max=1e4, taillist=True):
""" Spectral distributions
Spectral parameters for %(spectral_fits)d pulsars with significant fits (TS>16)
%(pulsar_tail_check)s
"""
psrmodel = (self.df.ts>16) & (self.df.modelname=='PLSuperExpCutoff') & self.df.psr
self.spectral_fits = sum(psrmodel)
t = self.df.loc[psrmodel]\
['ts flux pindex cutoff e0 index2 index2_unc roiname freebits fitqual msec'.split()]
t['eflux'] = t.flux * t.e0**2 * 1e6
msec = np.array(t.msec.values,bool)
def histit(ax, bins, vals):
hkw = dict(histtype='stepfilled', alpha=0.5, lw=2)
ax.hist(vals[msec], bins, label='msec', color='lightblue',edgecolor='blue', **hkw )
ax.hist(vals[~msec], bins, label='young',color='pink', edgecolor='red', **hkw)
def plot1(ax, efmin=1e-2,efmax=1e3):
bins = np.logspace(np.log10(efmin),np.log10(efmax),26)
vals = np.array(t.eflux,float).clip(efmin,efmax)
histit(ax, bins, vals)
ax.set(xscale='log', xlabel='energy flux', xlim=(efmin,efmax)); ax.grid(alpha=0.5);
ax.legend(prop=dict(size=10))
def plot3(ax):
bins = np.linspace(index_min,index_max,16)
vals = np.array(t.pindex,float).clip(index_min,index_max)
histit(ax, bins, vals)
ax.set( xlabel='spectral index'); ax.grid(alpha=0.5);
ax.legend(prop=dict(size=10))
def plot2(ax):
bins = np.logspace(2,4,26)
vals = np.array(t.cutoff,float).clip(None,cutoff_max)
histit(ax,bins, vals)
ax.set(xscale='log', xlabel='cutoff energy (GeV)'); ax.grid(alpha=0.5)
ax.legend(prop=dict(size=10))
ax.xaxis.set_major_formatter(ticker.FuncFormatter(
lambda val,pos: { 100:'0.1', 1000:'1', 10000:'10'}.get(val,'')))
def plot4(ax):
xvals = np.array(t.cutoff,float).clip(None, cutoff_max)
yvals = np.array(t.pindex,float).clip(index_min,index_max)
ax.plot(xvals[msec], yvals[msec], 'o', color='blue', label='msec')
ax.plot(xvals[~msec], yvals[~msec], 'D', color='orange', label='young')
ax.set(xscale='log', xlabel='cutoff [GeV]', ylabel='spectral index',
ylim=(index_min-0.1, index_max+0.1),
)
ax.grid(alpha=0.5);
ax.legend(loc='lower right', prop=dict(size=10))
ax.xaxis.set_major_formatter(ticker.FuncFormatter(
lambda val,pos: { 100:'0.1', 1000:'1', 10000:'10'}.get(val,'')))
fig, axx = plt.subplots( 2,2, figsize=(12,12))
plt.subplots_adjust(wspace=0.3, left=0.05,bottom=0.15)
map(lambda f,ax:f(ax),(plot1,plot2,plot3,plot4,), axx.flatten())
tail_cut = (t.pindex<=index_min) | (t.pindex>index_max) | (t.cutoff>cutoff_max)
tails = t.loc[tail_cut].index
print ('%d pulsar sources found in tails of index or cutoff' % sum(tail_cut))
if taillist & (sum(tail_cut)>0) :
tails=t[tail_cut]['ts eflux pindex cutoff freebits roiname'.split()]
filename = 'pulsar_tails.html'
html_file = self.plotfolder+'/%s' % filename
#html = tails.sort_values(by='roiname').to_html(float_format=FloatFormat(2))
html = html_table(tails.sort_values(by='roiname'), float_format=FloatFormat(2))
open(html_file,'w').write('<head>\n'+ _html.style + '</head>\n<body>'+ html+'\n</body>')
self.pulsar_tail_check = '<p><a href="%s?skipDecoration">Table of %d sources on tails</a>: '% (filename, len(tails))
self.pulsar_tail_check += 'Criteria: require index between 0 and 2.5, cutoff < {:.1f} GeV'.format(cutoff_max*1e-3)
else:
self.pulsar_tail_check ='<p>No sources on tails'
return fig
def pulsar_check(self):
"""LAT pulsar check
%(atable)s
"""
# compare with LAT pulsar catalog
lat=self.lcatdf
lat['ts'] = self.df[self.df.psr]['ts']
lat['aprob'] = self.df[self.df.psr]['aprob']
lat['ROI_index'] = [Band(12).index(SkyDir(float(ra),float(dec))) for ra,dec in zip(lat.RAJ2000,lat.DEJ2000)]
lat['skydir'] = [SkyDir(float(ra),float(dec)) for ra,dec in zip(lat.RAJ2000, lat.DEJ2000)]
lat['sourcedir'] = self.df.skydir[self.df.psr]
lat['delta'] = [np.degrees(s.difference(t)) if not type(t)==float else np.nan for s,t in zip(lat.skydir,lat.sourcedir)]
far = lat.delta>0.25
dc2names =set(self.lcatdf.index)
tt = set(self.df.name[self.df.psr])
print ('Catalog entries not found:', list(dc2names.difference(tt)))
missing = np.array([ np.isnan(x) or x<10. for x in lat.ts])
missing |= np.array((lat.aprob==0) & (lat.ts<1000) )
missing_names = lat.index[missing]
cols = 'RAJ2000 DEJ2000 ts delta ROI_index'.split()
self.latsel=latsel = pd.DataFrame( np.array([lat[id][missing] for id in cols]), index=cols, columns=missing_names).T
self.atable = '<h4>Compare with LAT pulsar catalog: {}</h4>'.format( self.version)
label_info= dict(ts='TS,Test Statistic', delta='delta,distance to fit position (deg)',
ROI_index='ROI Index,Index of the ROI, a HEALPix ring index')
self.atable += html_table(latsel.query('ts<10'), label_info,
heading = '<p>LAT catalog entries with weak or no fit (TS<10)',
name=self.plotfolder+'/weak', maxlines=20,
float_format=(FloatFormat(2)))
self.atable += html_table(latsel.query('ts>10'), label_info,
heading = '<p>LAT catalog entries with nearby, but unassociated source ',
name=self.plotfolder+'/far', maxlines=20,
float_format=(FloatFormat(2)))
def bigfile_associations(self, test=False):
"""BigFile Associations
Construct a list of non LAT pulsar point sources associated with the BigFile pulsar list (Version %(bigfile_version)s).
<br>Exclude sources with poor localization (quality>5) and BigFile pulsars in clusters.
<ul>
<li>%(bigfile_hi_table)s </li>
<li>%(bigfile_lo_table)s </li>
</ul>
"""
class BigFile(object):
""""manage look up in the BigFile"""
def __init__(self):
ff = sorted(glob.glob(os.path.expandvars('$FERMI/catalog/srcid/cat/Pulsars_BigFile_*.fits')))
t= fits.open(ff[-1])
print ('Read file {}'.format(ff[-1]))
self.version = ff[-1].split('_')[-1].split('.')[0]
self.d = pd.DataFrame(t[1].data)
self.names=[t.strip() for t in self.d.NAME.values]
self.jnames=[t.strip() for t in self.d.PSRJ.values]
def __call__(self, name):
"""Find the entry with given name"""
if name in self.names: i = self.names.index(name)
elif name in self.jnames: i= self.jnames.index(name)
else:
error = 'Data for source %s not found' %name
print (error)
raise ValueError(error)
return self.d.iloc[i]
not_psr = np.array([not n.startswith('PSR') for n in self.df.index],bool)
psrx = np.array([x=='pulsar_big' for x in self.df.acat], bool) & not_psr & (self.df.locqual<5)
print ('%d sources associated with BigFile pulsar list' % sum(psrx))
pt = self.df[psrx]['aprob aname aang ts glat glon pivot_energy curvature locqual'.split()]
# look it up in BigFile, add other stuff
self.bf = bf=BigFile()
self.bigfile_version = bf.version
anames = self.df[psrx].aname
pt['jname'] = jname = [bf(n).PSRJ for n in anames]
# test for the jname not ending in numeric character
not_incluster = [n[-1] in '0123456789' for n in pt.jname]
print (' Selected {} out of {} associations not in clusters'.format(sum(not_incluster), len(pt)))
def hmax(n):
t = bf(n)
return max(t.Hall32, t.Hall36, t.Hall40, t.Hval32, t.Hval36, t.Hval40)
pt['Hmax'] = [hmax(n) for n in anames]
pt['history']= [bf(n).History[1:-1].replace("'","") for n in anames]
pt['edot'] = ['%.2e'%bf(n).EDOT for n in anames]
pt['P0'] = ['{:.3f}'.format(bf(n).P0) for n in anames]
# make file table
ptx = pt[not_incluster]['jname glat glon edot P0 history Hmax ts aprob aang curvature pivot_energy locqual'.split()]
hilat = abs(pt.glat)>5
if len(ptx)>0:
colinfo=dict(name='Source Name,click for link to SED',
jname='Pulsar name,J version',
edot='Edot, rate of energy change',
Hmax='Hmax, max(Hall32, Hall36, Hall40, Hval32, Hval36, Hval40)',
pivot_energy='Pivot Energy,Energy of zero correlation between spectral index and normalization ',
history='History,BigFile history entry',
ts='TS,Test Statistic for the source',
aprob='Probability,Bayesian association probability',
aang='Angle,angular distance (deg)',
#curvature='curvature,?',
locqual='Localization quality,measure of the goodness of the localization fit\n greater than 5 is questionable',
)
self.bigfile_hi_table= \
html_table(ptx[hilat], colinfo, float_format=FloatFormat(2),
heading = """<b>Table of %d high-latitude (|b|>5) associations.</b>""" % sum(hilat),
name=self.plotfolder+'/hilat_table',
maxlines=10)
self.bigfile_lo_table= \
html_table(ptx[~hilat], colinfo, float_format=FloatFormat(2),
heading = """<b>Table of %d low-latitude (|b|<5) associations.</b> """ % sum(~hilat),
name=self.plotfolder+'/lolat_table',
maxlines=10)
else:
self.bigfile_hi_table= self.bigfile_lo_table=''
return ptx if test else None
def curvature(self, setup=False, cmax=1.0):
"""Curvature
Distribution of the curvature per source, equivalent to the beta parameter for a LogParabola spectral model.
"""
if setup:
#expect to be called initially
self.df['curvature']= np.array([model.curvature() for model in self.df.model])
return
assert 'curvature' in self.df, 'Curvature not calculated'
df = self.df
psr = np.asarray([n.startswith('PSR') for n in df.index], bool)
fig,ax = plt.subplots(figsize=(8,6))
hkw = dict(bins=np.linspace(0,cmax,41), log=True, histtype='step', lw=2)
ax.hist(df.curvature.clip(0,cmax), label='all sources', **hkw)
ax.hist(df[df.psr].curvature.clip(0,cmax), label='EC model', **hkw)
ax.hist(df[psr].curvature.clip(0,cmax), label='PSR souce', **hkw)
plt.setp(ax, xlabel='Curvature', ylim=(0.5,None))
ax.legend()
ax.grid()
return fig
def new_candidates(self):
"""Potential pulsar candidates
Make a list of sources with the selections
<ul>
<li>not associated
<li>not in 4FGL or withinn 0.5 deg of one
<li>nearest 4FGL source is extended or has TS>1000
<ii>
</ul>
The plots are of this list, showing
effect of curvature selection.
<h4>%(candidate_table)s</h4>
<br>A csv file of the above is <a href="../../%(pulsar_candidate_filename)s?download=true">here</a>
"""
# add info about 4FGL
self.check4FGL(pattern=None)
df=self.df
# select subset not in 4FGL and not associated and not close to a 4FGL source and that the closest is very strong
dfx = df.query('fl8y==False & aprob<0.8 & locqual<8 & distance>0.5 & other_extended==False & otherts<1000')
# values to display
ts = dfx.ts.astype(float).clip(0,1000)
singlat = np.sin(np.radians(dfx.glat.astype(float)))
curvature= dfx.curvature.astype(float).clip(0,1)
#curvature selection
cut = np.logical_and(curvature<0.75, curvature>0.15)
label_info = dict()
dfcut = dfx[cut]['ra dec ts glat pindex curvature locqual distance otherid otherts'.split()].sort_values(by='ts', ascending=False)
self.candidate_table = html_table(dfcut, label_info,
heading = '<b>Table of {} pointlike sources not in 4FGL, not assocated and with curvature selection</b>'.format(len(dfcut)),
name=self.plotfolder+'/candidates', maxlines=20,
float_format=(FloatFormat(2)))
self.pulsar_candidate_filename=self.plotfolder+'/pulsar_candidates.csv'
dfcut.to_csv(self.pulsar_candidate_filename)
self.df_pulsar_candidates = dfcut #for interactive
fig, (ax1,ax2, ax3) = plt.subplots(1,3, figsize=(12,5))
hkw = dict(histtype='step', lw=2)
def doit(ax, x, bins, xlabel, xlog=False):
ax.hist(x, bins, **hkw)
ax.hist(x[cut], bins, label='curvature cut', **hkw)
ax.set(xlabel=xlabel, xscale='log' if xlog else 'linear')
doit(ax2, ts, np.logspace(1,3,51), 'TS', xlog=True)
doit(ax3, singlat, np.linspace(-1,1,41), 'sin(b)')
doit(ax1, curvature, np.linspace(0,1,21), 'curvature')
return fig
def all_plots(self):
self.runfigures([
self.LATpulsars,
self.spectra,
self.pulsar_check,
self.bigfile_associations,
self.new_candidates,
])
#=================================================================================================
# Old stuff, may be useful
def efratio(self, e1=2000, e2=20000):
"""Examine energy flux ratio
Ratio of the energy flux at 20 GeV to that at 2 GeV.
The identified pulsar subset is shown.
"""
df=self.df
efr = df['eflux_ratio']=np.asarray([model(e2)/model(e1)*(e2/e1)**2 for model in self.df.model])
fig,ax = plt.subplots(figsize=(5,5))
xlim = (1e-2,10)
dom = np.logspace( np.log10(xlim[0]),np.log10(xlim[1]) ,31)
ax.hist(efr.clip(*xlim), dom ,log=True);
ax.hist(efr[self.psr].clip(*xlim), dom, log=True, color='orange', label='PSR');
plt.setp(ax, xscale='log', xlabel='eflux(20 GeV)/eflux(2 GeV)')
ax.legend()
ax.grid();
return fig
def selection(self, curvature_cut=0.1, ts_cut=10):
"""Select candidates.
%(selection_info)s
"""
self.curvature_cut=curvature_cut
self.ts_cut=ts_cut
df=self.df
probfun = lambda x: x['prob'][0] if not pd.isnull(x) else 0
aprob = np.array([ probfun(assoc) for assoc in self.df.associations])
no3fgl = np.asarray([s is None for s in self.df.cat3fgl]);
self.keep= keep = no3fgl &(~self.psr) \
& (self.df.curvature>curvature_cut) & (self.df.ts>ts_cut) & (self.df.locqual<8) &(aprob<0.1)
self.total=sum(keep)
self.cvsname='pulsar_candidates.csv'
t = self.df[keep]['ra dec glat ts pivot_energy pindex eflux_ratio curvature roiname'.split()]
t.to_csv(self.cvsname)
print ('wrote %d sources to %s' % (len(t), self.cvsname))
self.selection_info="""\
Cuts: non-3FGL, non-LAT PSR, association probability < 0.1, curvature>%(curvature_cut)s, TS>%(ts_cut)s<br>,
<br>Total:%(total)s
<br>%(html_list)s
<br>
Link to csv format table:
<a href="../../%(cvsname)s?download=true">%(cvsname)s</a></li>
"""
self.html_list = html_table(t, name=self.plotfolder+'/candidates',
heading='<h4>%d Candidate pulsar sources</h4>' % len(t),
float_format=FloatFormat(2))
def no_curvature(self, prefix='S966', ts_high_cut=2):
"""Weak new sources with PW fits
%(weak_list)s
"""
df = self.df
pcut = np.array([n.startswith(prefix) for n in df.index],bool);
cut = (df.ts>10) & (df.locqual<8) & (df.curvature<0.01) & pcut & (df.ts_high<ts_high_cut) & (df.ts_low<5)
t = self.df[cut]['ra dec glat ts pivot_energy pindex fitqual locqual ts_low ts_med ts_high roiname'.split()]
self.noc_df=t.sort_index(by='roiname')
print ('selected %d %s sources' % (len(t), prefix))
self.weak_list = html_table(t, name=self.plotfolder+'/weak_pl_sources',
heading='<h4>%d weak new power-law sources</h4>' % len(t),
float_format=FloatFormat(2))
def load_list(self, filename):
df = self.df
print ('Reading the LOFAR list "{}"'.format(filename))
self.lofar=lofar= | pd.read_csv(filename, index_col=0) | pandas.read_csv |
"""
This script contains all necessary code to extract and convert the patients data from the Sciensano hospital survey into parameters usable by the BIOMATH COVID-19 SEIRD model.
You must place the super secret detailed hospitalization dataset `COVID19BE_CLINIC.csv` in the same folder as this script in order to run it.
Further, you must MANUALLY replace décédé and rétabli in the file `COVID19BE_CLINIC.csv` with D and R.
To load the resulting .xlsx into a pandas dataframe use:
dataframe = pd.read_excel('../../data/interim/model_parameters/COVID19_SEIRD/sciensano_hospital_parameters.xlsx', sheet_name='residence_times', index_col=0, header=[0,1])
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2020 by <NAME>, BIOMATH, Ghent University. All Rights Reserved."
# ----------------------
# Load required packages
# ----------------------
import os
import math
import numpy as np
import pandas as pd
from scipy.stats import mannwhitneyu, ttest_ind, gamma, exponweib, weibull_min
import matplotlib.pyplot as plt
import datetime
from datetime import timedelta
import argparse
# ----------------
# Script arguments
# ----------------
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--subset_size", help="Size of subset drawn from total population during bootstrapping", default=1000, type=int)
parser.add_argument("-n", "--number_iterations", help="Total number of bootstraps", default=100, type=int)
parser.add_argument("-a", "--age_stratification_size", help="Total number of age groups", default=9, type=int)
# Save as dict
args = parser.parse_args()
# Set correct age_classes
if args.age_stratification_size == 3:
age_classes = pd.IntervalIndex.from_tuples([(0,20),(20,60),(60,120)], closed='left')
age_path = '0_20_60/'
elif args.age_stratification_size == 9:
age_classes = pd.IntervalIndex.from_tuples([(0,10),(10,20),(20,30),(30,40),(40,50),(50,60),(60,70),(70,80),(80,120)], closed='left')
age_path = '0_10_20_30_40_50_60_70_80/'
elif args.age_stratification_size == 10:
age_classes =pd.IntervalIndex.from_tuples([(0,12),(12,18),(18,25),(25,35),(35,45),(45,55),(55,65),(65,75),(75,85),(85,120)], closed='left')
age_path = '0_12_18_25_35_45_55_65_75_85/'
else:
raise ValueError(
"age_stratification_size '{0}' is not legitimate. Valid options are 3, 9 or 10".format(args.age_stratification_size)
)
# -----
# Paths
# -----
fig_path = '../../results/analysis/hospital/'+age_path
data_path = '../../data/interim/model_parameters/COVID19_SEIQRD/hospitals/' + age_path
# Verify that the paths exist and if not, generate them
for directory in [fig_path, data_path]:
if not os.path.exists(directory):
os.makedirs(directory)
# -----------------------------
# Helper functions and settings
# -----------------------------
plot_fit=False
colorscale_okabe_ito = {"orange" : "#E69F00", "light_blue" : "#56B4E9",
"green" : "#009E73", "yellow" : "#F0E442",
"blue" : "#0072B2", "red" : "#D55E00",
"pink" : "#CC79A7", "black" : "#000000"}
def adjacent_values(vals, q1, q3):
upper_adjacent_value = q3 + (q3 - q1) * 1.5
upper_adjacent_value = np.clip(upper_adjacent_value, q3, vals[-1])
lower_adjacent_value = q1 - (q3 - q1) * 1.5
lower_adjacent_value = np.clip(lower_adjacent_value, vals[0], q1)
return lower_adjacent_value, upper_adjacent_value
def set_axis_style(ax, labels):
ax.xaxis.set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(labels)
ax.set_xlim(0.25, len(labels) + 0.75)
ax.set_xlabel('Sample name')
def fit_weibull(v):
sample_size_lst=[]
shape_lst=[]
loc_lst=[]
scale_lst=[]
for age_group in v.index.get_level_values(0).unique().values:
if isinstance(v[age_group],list):
values = [x for x in v[age_group] if (math.isnan(x) == False)]
shape, loc, scale = weibull_min.fit(values,floc=0)
sample_size_lst.append(len(v[age_group]))
else:
v[age_group][v[age_group]==0] = 0.01
v = v.dropna()
shape, loc, scale = weibull_min.fit(v[age_group].values,floc=0)
sample_size_lst.append(len(v[age_group].values))
shape_lst.append(shape)
loc_lst.append(loc)
scale_lst.append(scale)
return sample_size_lst, shape_lst, loc_lst, scale_lst
def plot_weibull_fit(v,par,max_val):
fig,axes = plt.subplots(nrows=3,ncols=3,sharex=True,figsize=(12,12))
axes = axes.flatten()
for idx,age_group in enumerate(v.index.get_level_values(0).unique().values):
bins = np.linspace(0, max_val, 10)
axes[idx].hist(v[age_group], bins=bins, density=True)
x = np.linspace (0.5, max_val, 1000)
#y = gamma.pdf(x, a=residence_times[par,'shape'][age_group], loc=residence_times[par,'loc'][age_group], scale=residence_times[par,'scale'][age_group])
y = weibull_min.pdf(x, c=residence_times[par,'shape'][age_group], loc=residence_times[par,'loc'][age_group], scale=residence_times[par,'scale'][age_group])
axes[idx].plot(x,y)
axes[idx].text(x=0.70,y=0.82,s='Shape: '+"{:.2f}".format(residence_times[par,'shape'][age_group]) + '\nScale: ' + "{:.2f}".format(residence_times[par,'scale'][age_group]) + '\nLoc: '+ "{:.2f}".format(residence_times[par,'loc'][age_group]), transform=axes[idx].transAxes, fontsize=8)
axes[idx].set_title('Age group: ' + str(age_group), fontsize=12)
axes[idx].set_xlim([0,max_val])
fig.suptitle(par,fontsize=16)
plt.show()
plt.close()
#######################################################
## Load and format Sciensano hospital survey dataset ##
#######################################################
df = pd.read_csv('COVID19BE_CLINIC.csv')
n_orig = df.shape[0]
print('The original dataset contains ' + str(n_orig) + ' entries.')
# Drop the columns on admission_data and discharge_data --> do this myself
df=df.drop(columns=['admission_data','discharge_data'])
# Drop the columns with missing age
df.dropna(subset=['age'], inplace=True)
n_filtering_age = df.shape[0]
print(str(n_orig-n_filtering_age) + ' entries were removed because the age was missing.')
# Only if admission data, discharge data, status of discharge and ICU transfer is known, the data can be used by our model
df.dropna(subset=['dt_admission'], inplace=True)
df.dropna(subset=['dt_discharge'], inplace=True)
df.dropna(subset=['status_discharge'], inplace=True)
df.dropna(subset=['ICU_transfer'], inplace=True)
df.drop(df[df.status_discharge == 'Autre'].index, inplace=True)
df.drop(df[df.status_discharge == 'Inconnu'].index, inplace=True)
df.drop(df[df.status_discharge == 'Transfert'].index, inplace=True)
n_filtering_dates = df.shape[0]
print(str(n_filtering_age-n_filtering_dates) + ' entries were removed because the admission date, discharge date, status at discharge or ICU transfer was missing.')
# Convert dates to pd.datetimes
df['dt_admission'] = pd.to_datetime(df['dt_admission'])
df['dt_admission'] = df['dt_admission'].dt.date
df['dt_discharge'] = pd.to_datetime(df['dt_discharge'])
df['dt_discharge'] = df['dt_discharge'].dt.date
df['dt_onset'] = pd.to_datetime(df['dt_onset'])
df['dt_onset'] = df['dt_onset'].dt.date
df['dt_icu_transfer'] = pd.to_datetime(df['dt_icu_transfer'])
df['dt_icu_transfer'] = df['dt_icu_transfer'].dt.date
# Add column with the age classes
df['age_class'] = pd.cut(df.age, bins=age_classes)
# Remove the negative residence times
df.drop(df[((df['dt_discharge'] - df['dt_admission'])/datetime.timedelta(days=1)) < 0].index, inplace=True)
# Remove the negative admission to onset times
df.drop(df[((df['dt_admission'] - df['dt_onset'])/datetime.timedelta(days=1)) < 0].index, inplace=True)
# Remove all residence times larger than 180 days
df.drop(df[((df['dt_discharge'] - df['dt_admission'])/datetime.timedelta(days=1)) >= 180].index, inplace=True)
n_filtering_times = df.shape[0]
print(str(n_filtering_dates-n_filtering_times) + ' entries were removed because the residence time or onset time were negative.')
# Drop retirement home patients from dataset
exclude_homes = True
if exclude_homes:
df.drop(df[df.Expo_retirement_home == 'Oui'].index, inplace=True)
n_filtering_homes = df.shape[0]
print(str(n_filtering_times-n_filtering_homes) + ' additional entries were removed because the patient came from a retirement home.')
# Print a summary of the filtering
print(str(n_orig-n_filtering_homes)+' entries were removed during filtering. '+str(n_filtering_homes)+' entries remained.')
else:
# Print a summary of the filtering
print(str(n_orig-n_filtering_times)+' entries were removed during filtering. '+str(n_filtering_times)+' entries remained.')
###################################################
## Compute fractions: c, m0, m0_{ICU} and m0_{C} ##
###################################################
quantiles = [25,75,2.5,97.5]
# ------------------------------------------------------
# Initialize dataframe for results and population totals
# ------------------------------------------------------
columns = [[],[]]
tuples = list(zip(*columns))
columns = pd.MultiIndex.from_tuples(tuples, names=["parameter", "quantity"])
fractions = pd.DataFrame(index=age_classes, columns=columns)
averages = pd.DataFrame(index=['population'],columns=columns)
# -------------------------------------------
# Compute fraction parameters point estimates
# -------------------------------------------
# Sample size
fractions['total_sample_size','point estimate']=df.groupby(by='age_class').apply(lambda x: x.age.count())
# Hospitalization propensity
fractions['admission_propensity','point estimate']=df.groupby(by='age_class').apply(lambda x: x.age.count())/df.shape[0]
# Distribution cohort/icu
fractions['c','point estimate'] = df.groupby(by='age_class').apply(lambda x: x[x.ICU_transfer=='Non'].age.count()/x[x.ICU_transfer.isin(['Oui', 'Non'])].age.count())
# Mortalities
fractions['m0','point estimate']=df.groupby(by='age_class').apply(
lambda x: x[( (x.status_discharge=='D'))].age.count()/
x[x.ICU_transfer.isin(['Oui', 'Non'])].age.count())
fractions['m0_{ICU}','point estimate']= df.groupby(by='age_class').apply(
lambda x: x[((x.ICU_transfer=='Oui') & (x.status_discharge=='D'))].age.count()/
x[x.ICU_transfer.isin(['Oui'])].age.count())
fractions['m0_{C}','point estimate']= df.groupby(by='age_class').apply(
lambda x: x[((x.ICU_transfer=='Non') & (x.status_discharge=='D'))].age.count()/
x[x.ICU_transfer.isin(['Non'])].age.count())
# -----------------------------
# Bootstrap fraction parameters
# -----------------------------
subset_size = args.subset_size
n = args.number_iterations
# First initialize a numpy array for the results
# First axis: parameter: c, m0, m0_C, m0_ICU
# Second axis: age group
# Third axis: bootstrap sample
bootstrap_fractions_age = np.zeros([4, len(age_classes), n])
# Loop over parameters
for idx in range(4):
for jdx in range(n):
smpl = df.groupby(by='age_class').apply(lambda x: x.sample(n=subset_size,replace=True))
smpl=smpl.drop(columns='age_class')
if idx == 0:
bootstrap_fractions_age[idx,:,jdx] = smpl.groupby(by='age_class').apply(lambda x: x[x.ICU_transfer=='Non'].age.count()/
x[x.ICU_transfer.isin(['Oui', 'Non'])].age.count()).values
elif idx == 1:
bootstrap_fractions_age[idx,:,jdx] = smpl.groupby(by='age_class').apply(lambda x: x[( (x.status_discharge=='D'))].age.count()/
x[x.ICU_transfer.isin(['Oui', 'Non'])].age.count()).values
elif idx == 2:
bootstrap_fractions_age[idx,:,jdx] = smpl.groupby(by='age_class').apply(lambda x: x[((x.ICU_transfer=='Non') & (x.status_discharge=='D'))].age.count()/
x[x.ICU_transfer.isin(['Non'])].age.count()).values
elif idx == 3:
bootstrap_fractions_age[idx,:,jdx] = smpl.groupby(by='age_class').apply(lambda x: x[((x.ICU_transfer=='Oui') & (x.status_discharge=='D'))].age.count()/
x[x.ICU_transfer.isin(['Oui'])].age.count()).values
# Compute summary statistics
for idx,par in enumerate(['c', 'm0', 'm0_{C}', 'm0_{ICU}']):
fractions[par,'bootstrap mean'] = np.median(bootstrap_fractions_age[idx,:,:], axis=1)
fractions[par,'bootstrap median'] = np.median(bootstrap_fractions_age[idx,:,:], axis=1)
for quantile in quantiles:
fractions[par,'bootstrap Q'+str(quantile)] = np.quantile(bootstrap_fractions_age[idx,:,:], q=quantile/100, axis=1)
# Save raw samples as a .npy
with open(data_path+'sciensano_bootstrap_fractions.npy', 'wb') as f:
np.save(f,bootstrap_fractions_age)
# Compute population average/total point estimate
averages['total_sample_size','point estimate'] = fractions['total_sample_size','point estimate'].sum()
averages['admission_propensity', 'point estimate'] = sum(((fractions['total_sample_size','point estimate']*fractions['admission_propensity', 'point estimate']).values)/(np.ones(len(age_classes))*fractions['total_sample_size', 'point estimate'].sum()))
averages['c', 'point estimate'] = df[df.ICU_transfer=='Non'].age.count()/df[df.ICU_transfer.isin(['Oui', 'Non'])].age.count()
averages['m0', 'point estimate'] = df[((df.status_discharge=='D'))].age.count()/df[df.ICU_transfer.isin(['Oui', 'Non'])].age.count()
averages['m0_{ICU}', 'point estimate'] = df[((df.ICU_transfer=='Oui') & (df.status_discharge=='D'))].age.count()/df[df.ICU_transfer.isin(['Oui'])].age.count()
averages['m0_{C}', 'point estimate'] = df[((df.ICU_transfer=='Non') & (df.status_discharge=='D'))].age.count()/df[df.ICU_transfer.isin(['Non'])].age.count()
# Bootstrap total population
bootstrap_fractions = np.zeros([4, n])
# Loop over parameters
for idx in range(4):
for jdx in range(n):
smpl = df.sample(n=subset_size,replace=True)
if idx == 0:
bootstrap_fractions[idx,jdx] = smpl[smpl.ICU_transfer=='Non'].age.count()/smpl[smpl.ICU_transfer.isin(['Oui', 'Non'])].age.count()
elif idx == 1:
bootstrap_fractions[idx,jdx] = smpl[((smpl.status_discharge=='D'))].age.count()/smpl[smpl.ICU_transfer.isin(['Oui', 'Non'])].age.count()
elif idx == 2:
bootstrap_fractions[idx,jdx] = smpl[((smpl.ICU_transfer=='Non') & (smpl.status_discharge=='D'))].age.count()/smpl[smpl.ICU_transfer.isin(['Non'])].age.count()
elif idx == 3:
bootstrap_fractions[idx,jdx] = smpl[((smpl.ICU_transfer=='Oui') & (smpl.status_discharge=='D'))].age.count()/smpl[smpl.ICU_transfer.isin(['Oui'])].age.count()
# Compute summary statistics
for idx,par in enumerate(['c', 'm0', 'm0_{C}', 'm0_{ICU}']):
averages[par,'bootstrap mean'] = np.median(bootstrap_fractions[idx,:])
averages[par,'bootstrap median'] = np.median(bootstrap_fractions[idx,:])
for quantile in quantiles:
averages[par,'bootstrap Q'+str(quantile)] = np.quantile(bootstrap_fractions[idx,:], q=quantile/100)
# -------------------------------------------
# Perform Mann-Whitney U-tests on mortalities
# -------------------------------------------
# Difference in mortality, ICU vs. Cohort
# Boxplot
x = bootstrap_fractions[2,:]
y = bootstrap_fractions[3,:]
stat, p_tt = ttest_ind(x, y)
stat, p_mwu = mannwhitneyu(x, y)
fig, ax = plt.subplots(figsize=(8,6))
bp = ax.boxplot([x, y], positions=[1,2])
plt.setp(bp['medians'], color='k')
ax.set_ylabel('mortality (-)')
ax.set_ylim(0,1)
ax.set_xticklabels(['Cohort mortality (N={}) \n median = {:.2f} \n mean = {:.2f}'.format(len(x), np.median(x), np.mean(x)),
'ICU mortality (N={}) \n median = {:.2f} \n mean = {:.2f}'.format(len(y), np.median(y), np.mean(y))])
ax.set_title('Difference in overall mortality, \ntwo-sided t-test: p={:.2e} \nMann-Withney U-test: p={:.2e}'.format(p_tt,p_mwu))
plt.savefig(fig_path+'SCIENSANO_test_mortalities.pdf', dpi=600, bbox_inches='tight',orientation='portrait', papertype='a4')
plt.close()
# -----------------------------------------------------------------
# Make a violin plot of mortalities in ICU and cohort per age group
# -----------------------------------------------------------------
data = []
for idx,age_class in enumerate(age_classes):
data.append(bootstrap_fractions_age[2,idx,:])
# Violin plot
fig,ax = plt.subplots(figsize=(12,4))
parts = ax.violinplot(
data, positions=range(1,len(age_classes)+1), vert=False,showmeans=False, showmedians=False,
showextrema=False)
for idx,pc in enumerate(parts['bodies']):
pc.set_facecolor(colorscale_okabe_ito['green'])
pc.set_edgecolor('black')
pc.set_alpha(1)
quartiles = [25, 50, 75]
quartile1 = np.zeros(len(data))
medians = np.zeros(len(data))
quartile3 = np.zeros(len(data))
for i,x in enumerate(data):
quartile1[i],medians[i],quartile3[i] = np.percentile(x, quartiles)
whiskers = np.array([
adjacent_values(sorted_array, q1, q3)
for sorted_array, q1, q3 in zip(data, quartile1, quartile3)])
whiskers_min, whiskers_max = whiskers[:, 0], whiskers[:, 1]
inds = np.arange(1, len(medians)+1)
ax.scatter( medians, inds, marker='o', color='white', s=30, zorder=3)
ax.hlines(inds, quartile1, quartile3, color='k', linestyle='-', lw=5)
ax.hlines(inds, whiskers_min, whiskers_max, color='k', linestyle='-', lw=1)
data = []
for idx,age_class in enumerate(age_classes):
data.append(bootstrap_fractions_age[3,idx,:])
parts = ax.violinplot(
data, positions=range(1,len(age_classes)+1), vert=False,showmeans=False, showmedians=False,
showextrema=False)
for idx,pc in enumerate(parts['bodies']):
pc.set_facecolor(colorscale_okabe_ito['red'])
pc.set_edgecolor('black')
pc.set_alpha(1)
quartiles = [25, 50, 75]
quartile1 = np.zeros(len(data))
medians = np.zeros(len(data))
quartile3 = np.zeros(len(data))
for i,x in enumerate(data):
quartile1[i],medians[i],quartile3[i] = np.percentile(x, quartiles)
whiskers = np.array([
adjacent_values(sorted_array, q1, q3)
for sorted_array, q1, q3 in zip(data, quartile1, quartile3)])
whiskers_min, whiskers_max = whiskers[:, 0], whiskers[:, 1]
inds = np.arange(1, len(medians)+1)
ax.scatter( medians, inds, marker='o', color='white', s=30, zorder=3)
ax.hlines(inds, quartile1, quartile3, color='k', linestyle='-', lw=5)
ax.hlines(inds, whiskers_min, whiskers_max, color='k', linestyle='-', lw=1)
ax.set_xlabel('mortality (-)')
ax.set_xlim(0,1)
ax.set_ylim(0,len(age_classes)+1)
ax.set_yticks(inds)
ax.set_yticklabels(age_classes.values,fontsize=10)
plt.tight_layout()
plt.savefig(fig_path+'SCIENSANO_violin_mortalities.pdf', dpi=300, bbox_inches='tight',orientation='portrait', papertype='a4')
plt.close()
# Concatenate dataframes
fractions = pd.concat([fractions, averages])
###################################################################################
## Compute residence times: d_{hospital}, d_{C,R}, d_{C,D}, d_{ICU,R}, d_{ICU,D} ##
###################################################################################
# --------------------------------
# Initialize dataframe for results
# --------------------------------
columns = [[],[]]
tuples = list(zip(*columns))
columns = pd.MultiIndex.from_tuples(tuples, names=["parameter", "quantity"])
residence_times = pd.DataFrame(index=age_classes, columns=columns)
samples = pd.DataFrame(index=age_classes, columns=[])
samples_total = pd.DataFrame(index=['total'], columns=[])
# ----------
# d_hospital
# ----------
# Summary statistics
residence_times['d_hospital','mean'] = df.groupby(by='age_class').apply(lambda x: (x['dt_admission'] - x['dt_onset']).mean()/datetime.timedelta(days=1))
residence_times['d_hospital','median'] = df.groupby(by='age_class').apply(lambda x: (x['dt_admission'] - x['dt_onset']).median()/datetime.timedelta(days=1))
for quantile in quantiles:
residence_times['d_hospital','Q'+str(quantile)] = df.groupby(by='age_class').apply(lambda x: (x['dt_admission'] - x['dt_onset']).quantile(q=quantile/100)/datetime.timedelta(days=1))
# Gamma fit
v = df.groupby(by='age_class').apply(lambda x: (x['dt_admission'] - x['dt_onset'])/datetime.timedelta(days=1))
residence_times['d_hospital','sample_size'], residence_times['d_hospital','shape'],residence_times['d_hospital','loc'],residence_times['d_hospital','scale'] = fit_weibull(v)
if plot_fit:
plot_weibull_fit(v,'d_hospital',30)
# ----------------------------
# Transfer time cohort --> ICU
# ----------------------------
# Days in cohort before ICU transfer
df['d_transfer'] = np.nan
values=[]
for i in range(len(df['d_transfer'])):
if ((df['ICU_transfer'].iloc[i] == 'Oui') & (not pd.isnull(df['dt_icu_transfer'].iloc[i]))):
val = (df['dt_icu_transfer'].iloc[i] - df['dt_admission'].iloc[i])/datetime.timedelta(days=1)
if ((val >= 0) & (val <= 21)):
df['d_transfer'].iloc[i] = val
if val == 0:
values.append(0.010)
else:
values.append(val)
values_d_transfer = values
# Summary statistics
residence_times['d_transfer','mean'] = df.groupby(by='age_class').apply(lambda x: x[x.ICU_transfer=='Oui'].d_transfer.mean())
residence_times['d_transfer','median'] = df.groupby(by='age_class').apply(lambda x: x[x.ICU_transfer=='Oui'].d_transfer.median())
for quantile in quantiles:
residence_times['d_transfer','Q'+str(quantile)] = df.groupby(by='age_class').apply(lambda x: x[x.ICU_transfer=='Oui'].d_transfer.quantile(q=quantile/100))
# Gamma fit
v = df.groupby(by='age_class').apply(lambda x: x[x.ICU_transfer=='Oui'].d_transfer)
residence_times['d_transfer','sample_size'], residence_times['d_transfer','shape'], residence_times['d_transfer','loc'], residence_times['d_transfer', 'scale'] = fit_weibull(v)
if plot_fit:
plot_weibull_fit(v,'d_transfer',30)
# Append samples
samples['d_transfer'] = df.groupby(by='age_class').d_transfer.agg(lambda x: list(x.dropna()))
samples_total['d_transfer'] = [df.d_transfer.agg(lambda x: list(x.dropna()))]
# --------
# dICU,rec
# --------
cutoff = 60
df['dICUrec'] = np.nan
values=[]
for i in range(len(df['d_transfer'])):
if ((df['ICU_transfer'].iloc[i] == 'Oui') & (not pd.isnull(df['dt_icu_transfer'].iloc[i])) & (df['status_discharge'].iloc[i] == 'R') & (not pd.isnull(df['length_stay_ICU'].iloc[i]))):
val = (df['dt_discharge'].iloc[i] - (df['dt_icu_transfer'].iloc[i] + datetime.timedelta(days=df['length_stay_ICU'].iloc[i])))/datetime.timedelta(days=1)
if ((val >= 0) & (val <= cutoff)):
df['dICUrec'].iloc[i] = val
# Summary statistics
residence_times['dICUrec','mean'] = df.groupby(by='age_class').apply(lambda x: x[((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))].dICUrec.mean())
residence_times['dICUrec','median'] = df.groupby(by='age_class').apply(lambda x: x[((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))].dICUrec.median())
for quantile in quantiles:
residence_times['dICUrec','Q'+str(quantile)] = df.groupby(by='age_class').apply(lambda x: x[((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))].dICUrec.quantile(q=quantile/100))
# Gamma fit
v = df.groupby(by='age_class').apply(lambda x: x[((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))].dICUrec)
residence_times['dICUrec','sample_size'], residence_times['dICUrec','shape'], residence_times['dICUrec','loc'], residence_times['dICUrec', 'scale'] = fit_weibull(v)
if plot_fit:
plot_weibull_fit(v,'dICUrec',cutoff)
# ---
# dC
# ---
# Summary statistics
residence_times['dC','mean']=df.groupby(by='age_class').apply(lambda x: ((x['dt_discharge'][x.ICU_transfer=='Non'] - x['dt_admission'][x.ICU_transfer=='Non'])/datetime.timedelta(days=1)).mean())
residence_times['dC','median']=df.groupby(by='age_class').apply(lambda x: ((x['dt_discharge'][x.ICU_transfer=='Non'] - x['dt_admission'][x.ICU_transfer=='Non'])/datetime.timedelta(days=1)).median())
for quantile in quantiles:
residence_times['dC','Q'+str(quantile)]=df.groupby(by='age_class').apply(lambda x: ((x['dt_discharge'][x.ICU_transfer=='Non'] - x['dt_admission'][x.ICU_transfer=='Non'])/datetime.timedelta(days=1)).quantile(q=quantile/100))
# Gamma fit
v = df.groupby(by='age_class').apply(lambda x: ((x['dt_discharge'][x.ICU_transfer=='Non'] - x['dt_admission'][x.ICU_transfer=='Non'])/datetime.timedelta(days=1)))
residence_times['dC','sample_size'], residence_times['dC','shape'],residence_times['dC','loc'],residence_times['dC','scale'] = fit_weibull(v)
if plot_fit:
plot_weibull_fit(v,'dC',90)
# Append samples
samples['dC'] = df.groupby(by='age_class').apply(lambda x: ((x['dt_discharge'][x.ICU_transfer=='Non'] - x['dt_admission'][x.ICU_transfer=='Non'])/datetime.timedelta(days=1))).groupby(by='age_class').agg(lambda x: list(x))
samples_total['dC'] = [df.groupby(by='age_class').apply(lambda x: ((x['dt_discharge'][x.ICU_transfer=='Non'] - x['dt_admission'][x.ICU_transfer=='Non'])/datetime.timedelta(days=1))).agg(lambda x: list(x))]
# -----
# dC_R
# -----
# Summary statistics
residence_times['dC_R', 'mean']= df.groupby(by='age_class').apply(lambda x: ((x['dt_discharge'][((x.ICU_transfer=='Non')&(x.status_discharge=='R'))] - x['dt_admission'][((x.ICU_transfer=='Non')&(x.status_discharge=='R'))])/datetime.timedelta(days=1)).mean())
residence_times['dC_R', 'median']= df.groupby(by='age_class').apply(lambda x: ((x['dt_discharge'][((x.ICU_transfer=='Non')&(x.status_discharge=='R'))] - x['dt_admission'][((x.ICU_transfer=='Non')&(x.status_discharge=='R'))])/datetime.timedelta(days=1)).median())
for quantile in quantiles:
residence_times['dC_R', 'Q'+str(quantile)] = df.groupby(by='age_class').apply(lambda x: ((x['dt_discharge'][((x.ICU_transfer=='Non')&(x.status_discharge=='R'))] - x['dt_admission'][((x.ICU_transfer=='Non')&(x.status_discharge=='R'))])/datetime.timedelta(days=1)).quantile(q=quantile/100))
# Gamma fit
v = df.groupby(by='age_class').apply(lambda x: ((x['dt_discharge'][((x.ICU_transfer=='Non')&(x.status_discharge=='R'))] - x['dt_admission'][((x.ICU_transfer=='Non')&(x.status_discharge=='R'))])/datetime.timedelta(days=1)))
residence_times['dC_R','sample_size'], residence_times['dC_R','shape'],residence_times['dC_R','loc'],residence_times['dC_R','scale'] = fit_weibull(v)
if plot_fit:
plot_weibull_fit(v,'dC_R',90)
# Append samples
samples['dC_R'] = df.groupby(by='age_class').apply(lambda x: ((x['dt_discharge'][((x.ICU_transfer=='Non')&(x.status_discharge=='R'))] - x['dt_admission'][((x.ICU_transfer=='Non')&(x.status_discharge=='R'))])/datetime.timedelta(days=1))).groupby(by='age_class').agg(lambda x: list(x))
samples_total['dC_R'] = [df.groupby(by='age_class').apply(lambda x: ((x['dt_discharge'][((x.ICU_transfer=='Non')&(x.status_discharge=='R'))] - x['dt_admission'][((x.ICU_transfer=='Non')&(x.status_discharge=='R'))])/datetime.timedelta(days=1))).agg(lambda x: list(x))]
# -----
# dC_D
# -----
df['dt_discharge'] = pd.to_datetime(df['dt_discharge'])
df['dt_admission'] = pd.to_datetime(df['dt_admission'])
# Summary statistics
residence_times['dC_D', 'mean']=df.groupby(by='age_class').apply(lambda x: ((pd.to_datetime(x['dt_discharge'][((x.ICU_transfer=='Non')&(x.status_discharge=='D'))]) - pd.to_datetime(x['dt_admission'][((x.ICU_transfer=='Non')&(x.status_discharge=='D'))]))/datetime.timedelta(days=1)).mean()).fillna(1)
residence_times['dC_D', 'median']=df.groupby(by='age_class').apply(lambda x: ((x['dt_discharge'][((x.ICU_transfer=='Non')&(x.status_discharge=='D'))] - x['dt_admission'][((x.ICU_transfer=='Non')&(x.status_discharge=='D'))])/datetime.timedelta(days=1)).median()).fillna(1)
for quantile in quantiles:
residence_times['dC_D', 'Q'+str(quantile)]=df.groupby(by='age_class').apply(lambda x: ((x['dt_discharge'][((x.ICU_transfer=='Non')&(x.status_discharge=='D'))] - x['dt_admission'][((x.ICU_transfer=='Non')&(x.status_discharge=='D'))])/datetime.timedelta(days=1)).quantile(q=quantile/100)).fillna(1)
# Gamma fit
v = df.groupby(by='age_class').apply(lambda x: ((x['dt_discharge'][((x.ICU_transfer=='Non')&(x.status_discharge=='D'))] - x['dt_admission'][((x.ICU_transfer=='Non')&(x.status_discharge=='D'))])/datetime.timedelta(days=1)))
sample_size, shape, loc, scale = fit_weibull(v)
if args.age_stratification_size == 3:
append_idx = 1
elif args.age_stratification_size == 9:
append_idx = 2
elif args.age_stratification_size == 10:
append_idx = 2
for i in range(append_idx):
sample_size.insert(0,0)
shape.insert(0,1)
loc.insert(0,0)
scale.insert(0,1)
residence_times['dC_D','sample_size'], residence_times['dC_D','shape'],residence_times['dC_D','loc'],residence_times['dC_D','scale'] = sample_size, shape, loc, scale
if plot_fit:
plot_weibull_fit(v,'dC_D',90)
# Append samples
samples['dC_D'] = df.groupby(by='age_class').apply(lambda x: ((x['dt_discharge'][((x.ICU_transfer=='Non')&(x.status_discharge=='D'))] - x['dt_admission'][((x.ICU_transfer=='Non')&(x.status_discharge=='D'))])/datetime.timedelta(days=1))).groupby(by='age_class').agg(lambda x: list(x))
samples_total['dC_D'] = [df.groupby(by='age_class').apply(lambda x: ((x['dt_discharge'][((x.ICU_transfer=='Non')&(x.status_discharge=='D'))] - x['dt_admission'][((x.ICU_transfer=='Non')&(x.status_discharge=='D'))])/datetime.timedelta(days=1))).agg(lambda x: list(x))]
samples['dC_D'].loc[residence_times.index.get_level_values(0).unique().values[0]] = [1]
samples['dC_D'].loc[residence_times.index.get_level_values(0).unique().values[1]] = [1]
# -------
# dICU_R
# -------
# Summary statistics
residence_times['dICU_R','mean']=df.groupby(by='age_class').apply(lambda x: (((x['dt_discharge'][((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))] - pd.to_datetime(x['dt_admission'][((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))]))/datetime.timedelta(days=1)) - x.d_transfer[((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))] - x.dICUrec[((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))]).mean())
residence_times['dICU_R','median']=df.groupby(by='age_class').apply(lambda x: (((x['dt_discharge'][((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))] - pd.to_datetime(x['dt_admission'][((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))]))/datetime.timedelta(days=1)) - x.d_transfer[((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))] - x.dICUrec[((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))]).median())
for quantile in quantiles:
residence_times['dICU_R','Q'+str(quantile)]=df.groupby(by='age_class').apply(lambda x: (((x['dt_discharge'][((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))] - pd.to_datetime(x['dt_admission'][((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))]))/datetime.timedelta(days=1)) - x.d_transfer[((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))] - x.dICUrec[((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))]).quantile(q=quantile/100))
# Gamma fit
v = df.groupby(by='age_class').apply(lambda x: (((x['dt_discharge'][((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))] - pd.to_datetime(x['dt_admission'][((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))]))/datetime.timedelta(days=1)) - x.d_transfer[((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))] - x.dICUrec[((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))]))
residence_times['dICU_R','sample_size'], residence_times['dICU_R','shape'],residence_times['dICU_R','loc'],residence_times['dICU_R','scale'] = fit_weibull(v)
if plot_fit:
plot_weibull_fit(v,'dICU_R',90)
# Append samples
samples['dICU_R'] =df.groupby(by='age_class').apply(lambda x: (((x['dt_discharge'][((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))] - pd.to_datetime(x['dt_admission'][((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))]))/datetime.timedelta(days=1)) - x.d_transfer[((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))] - x.dICUrec[((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))])).groupby(by='age_class').agg(lambda x: list(x))
samples_total['dICU_R'] = [df.groupby(by='age_class').apply(lambda x: ((x['dt_discharge'][((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))] - x['dt_admission'][((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))])/datetime.timedelta(days=1)) - x.d_transfer[((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))] - x.dICUrec[((x.ICU_transfer=='Oui')&(x.status_discharge=='R'))]).agg(lambda x: list(x))]
# -------
# dICU_D
# -------
# Summary statistics
residence_times['dICU_D','mean']=df.groupby(by='age_class').apply(lambda x: (((x['dt_discharge'][((x.ICU_transfer=='Oui')&(x.status_discharge=='D'))] - pd.to_datetime(x['dt_admission'][((x.ICU_transfer=='Oui')&(x.status_discharge=='D'))]))/datetime.timedelta(days=1)) - x.d_transfer[((x.ICU_transfer=='Oui')&(x.status_discharge=='D'))]).mean()).fillna(1)
residence_times['dICU_D','median']=df.groupby(by='age_class').apply(lambda x: (((x['dt_discharge'][((x.ICU_transfer=='Oui')&(x.status_discharge=='D'))] - pd.to_datetime(x['dt_admission'][((x.ICU_transfer=='Oui')&(x.status_discharge=='D'))]))/datetime.timedelta(days=1)) - x.d_transfer[((x.ICU_transfer=='Oui')&(x.status_discharge=='D'))]).median()).fillna(1)
for quantile in quantiles:
residence_times['dICU_D','Q'+str(quantile)]=df.groupby(by='age_class').apply(lambda x: (((x['dt_discharge'][((x.ICU_transfer=='Oui')&(x.status_discharge=='D'))] - pd.to_datetime(x['dt_admission'][((x.ICU_transfer=='Oui')&(x.status_discharge=='D'))]))/datetime.timedelta(days=1)) - x.d_transfer[((x.ICU_transfer=='Oui')&(x.status_discharge=='D'))]).quantile(q=quantile/100)).fillna(1)
# Gamma fit
v = df.groupby(by='age_class').apply(lambda x: (((x['dt_discharge'][((x.ICU_transfer=='Oui')&(x.status_discharge=='D'))] - pd.to_datetime(x['dt_admission'][((x.ICU_transfer=='Oui')&(x.status_discharge=='D'))]))/datetime.timedelta(days=1)) - x.d_transfer[((x.ICU_transfer=='Oui')&(x.status_discharge=='D'))]))
sample_size, shape, loc, scale = fit_weibull(v)
if args.age_stratification_size == 3:
append_idx = 0
elif args.age_stratification_size == 9:
append_idx = 1
elif args.age_stratification_size == 10:
append_idx = 1
for i in range(append_idx):
sample_size.insert(0,0)
shape.insert(0,1)
loc.insert(0,0)
scale.insert(0,1)
residence_times['dICU_D','sample_size'], residence_times['dICU_D','shape'],residence_times['dICU_D','loc'],residence_times['dICU_D','scale'] = sample_size, shape, loc, scale
if plot_fit:
plot_weibull_fit(v,'dICU_D',90)
# Append samples
samples['dICU_D'] = df.groupby(by='age_class').apply(lambda x: (((x['dt_discharge'][((x.ICU_transfer=='Oui')&(x.status_discharge=='D'))] - pd.to_datetime(x['dt_admission'][((x.ICU_transfer=='Oui')&(x.status_discharge=='D'))]))/datetime.timedelta(days=1)) - x.d_transfer[((x.ICU_transfer=='Oui')&(x.status_discharge=='D'))])).groupby(by='age_class').agg(lambda x: list(x))
samples_total['dICU_D'] = [df.groupby(by='age_class').apply(lambda x: (((x['dt_discharge'][((x.ICU_transfer=='Oui')&(x.status_discharge=='D'))] - pd.to_datetime(x['dt_admission'][((x.ICU_transfer=='Oui')&(x.status_discharge=='D'))]))/datetime.timedelta(days=1)) - x.d_transfer[((x.ICU_transfer=='Oui')&(x.status_discharge=='D'))])).agg(lambda x: list(x))]
samples['dICU_D'].loc[residence_times.index.get_level_values(0).unique().values[0]] = [1]
#------
# dICU
# -----
# Add dICU_R and dICU_D together to compute parameters of dICU
samples['dICU'] = samples['dICU_R'] + samples['dICU_D']
# Summary statistics
residence_times['dICU','mean'] = np.nan
residence_times['dICU','median'] = np.nan
for quantile in quantiles:
residence_times['dICU','Q'+str(quantile)] = np.nan
for idx,age_group in enumerate(samples['dICU'].index.get_level_values(0).unique().values):
residence_times['dICU','mean'].loc[age_group] = np.nanmean(samples['dICU'][age_group])
residence_times['dICU','median'].loc[age_group] = np.nanmedian(samples['dICU'][age_group])
for quantile in quantiles:
residence_times['dICU','Q'+str(quantile)].loc[age_group] = np.nanquantile(samples['dICU'][age_group],q=quantile/100)
# Gamma fit
v = samples['dICU']#df.groupby(by='age_class').apply(lambda x: (((x['dt_discharge'][x.ICU_transfer=='Oui'] - pd.to_datetime(x['dt_admission'][x.ICU_transfer=='Oui']))/datetime.timedelta(days=1)) - x.d_transfer[x.ICU_transfer=='Oui']))
residence_times['dICU','sample_size'], residence_times['dICU','shape'],residence_times['dICU','loc'],residence_times['dICU','scale'] = fit_weibull(v)
if plot_fit:
plot_weibull_fit(v,'dICU',90)
# Append samples
samples_total['dICU'] = ''
samples_total['dICU'] = samples_total['dICU'].apply(list)
total_list=[]
for idx,age_group in enumerate(samples['dICU'].index.get_level_values(0).unique().values):
total_list.extend(samples['dICU'][age_group])
samples_total['dICU']['total'] = total_list
samples = pd.concat([samples, samples_total])
#################################
## Compute averages and totals ##
#################################
columns = [[],[]]
tuples = list(zip(*columns))
columns = pd.MultiIndex.from_tuples(tuples, names=["parameter", "quantity"])
averages = pd.DataFrame(index=['averages'], columns=columns)
# ---
# dC
# ---
# Summary statistics
averages['dC','mean'] = ((df['dt_discharge'][df.ICU_transfer=='Non'] - df['dt_admission'][df.ICU_transfer=='Non'])/datetime.timedelta(days=1)).mean()
averages['dC','median'] = ((df['dt_discharge'][df.ICU_transfer=='Non'] - df['dt_admission'][df.ICU_transfer=='Non'])/datetime.timedelta(days=1)).median()
for quantile in quantiles:
averages['dC','Q'+str(quantile)] = ((df['dt_discharge'][df.ICU_transfer=='Non'] - df['dt_admission'][df.ICU_transfer=='Non'])/datetime.timedelta(days=1)).quantile(q=quantile/100)
# Gamma fit
v = ((df['dt_discharge'][df.ICU_transfer=='Non'] - df['dt_admission'][df.ICU_transfer=='Non'])/datetime.timedelta(days=1))
v[v==0] = 0.01
averages['dC','sample_size'] = len(v)
averages['dC','shape'],averages['dC','loc'],averages['dC','scale'] = gamma.fit(v, floc=0)
# ----
# dC,R
# ----
# Summary statistics
averages['dC_R','mean'] = ((df['dt_discharge'][((df.ICU_transfer=='Non')&(df.status_discharge=='R'))] - df['dt_admission'][((df.ICU_transfer=='Non')&(df.status_discharge=='R'))])/datetime.timedelta(days=1)).mean()
averages['dC_R','median'] = ((df['dt_discharge'][((df.ICU_transfer=='Non')&(df.status_discharge=='R'))] - df['dt_admission'][((df.ICU_transfer=='Non')&(df.status_discharge=='R'))])/datetime.timedelta(days=1)).median()
for quantile in quantiles:
averages['dC_R','Q'+str(quantile)] = ((df['dt_discharge'][((df.ICU_transfer=='Non')&(df.status_discharge=='R'))] - df['dt_admission'][((df.ICU_transfer=='Non')&(df.status_discharge=='R'))])/datetime.timedelta(days=1)).quantile(q=quantile/100)
# Gamma fit
v = ((df['dt_discharge'][((df.ICU_transfer=='Non')&(df.status_discharge=='R'))] - df['dt_admission'][((df.ICU_transfer=='Non')&(df.status_discharge=='R'))])/datetime.timedelta(days=1))
v[v==0] = 0.01
averages['dC_R','sample_size'] = len(v)
averages['dC_R','shape'],averages['dC_R','loc'],averages['dC_R','scale'] = gamma.fit(v, floc=0)
# ----
# dC,D
# ----
# Summary statistics
averages['dC_D','mean'] = ((df['dt_discharge'][((df.ICU_transfer=='Non')&(df.status_discharge=='D'))] - df['dt_admission'][((df.ICU_transfer=='Non')&(df.status_discharge=='D'))])/datetime.timedelta(days=1)).mean()
averages['dC_D','median'] = ((df['dt_discharge'][((df.ICU_transfer=='Non')&(df.status_discharge=='D'))] - df['dt_admission'][((df.ICU_transfer=='Non')&(df.status_discharge=='D'))])/datetime.timedelta(days=1)).median()
for quantile in quantiles:
averages['dC_D','Q'+str(quantile)] = ((df['dt_discharge'][((df.ICU_transfer=='Non')&(df.status_discharge=='D'))] - df['dt_admission'][((df.ICU_transfer=='Non')&(df.status_discharge=='D'))])/datetime.timedelta(days=1)).quantile(q=quantile/100)
# Gamma fit
v = ((df['dt_discharge'][((df.ICU_transfer=='Non')&(df.status_discharge=='D'))] - df['dt_admission'][((df.ICU_transfer=='Non')&(df.status_discharge=='D'))])/datetime.timedelta(days=1))
v[v==0] = 0.01
averages['dC_D','sample_size'] = len(v)
averages['dC_D','shape'],averages['dC_D','loc'],averages['dC_D','scale'] = gamma.fit(v, floc=0)
# ------
# dICU,R
# ------
# Summary statistics
averages['dICU_R','mean'] = ((df['dt_discharge'][((df.ICU_transfer=='Oui')&(df.status_discharge=='R'))] - df['dt_admission'][((df.ICU_transfer=='Oui')&(df.status_discharge=='R'))])/datetime.timedelta(days=1) - df['d_transfer'][((df['ICU_transfer']=='Oui')&(df['status_discharge']=='R'))] - df['dICUrec'][((df['ICU_transfer']=='Oui')&(df['status_discharge']=='R'))]
).mean()
averages['dICU_R','median'] = ((df['dt_discharge'][((df.ICU_transfer=='Oui')&(df.status_discharge=='R'))] - df['dt_admission'][((df.ICU_transfer=='Oui')&(df.status_discharge=='R'))])/datetime.timedelta(days=1) - df['d_transfer'][((df['ICU_transfer']=='Oui')&(df['status_discharge']=='R'))] - df['dICUrec'][((df['ICU_transfer']=='Oui')&(df['status_discharge']=='R'))]
).median()
for quantile in quantiles:
averages['dICU_R','Q'+str(quantile)] = ((df['dt_discharge'][((df.ICU_transfer=='Oui')&(df.status_discharge=='R'))] - df['dt_admission'][((df.ICU_transfer=='Oui')&(df.status_discharge=='R'))])/datetime.timedelta(days=1) - df['d_transfer'][((df['ICU_transfer']=='Oui')&(df['status_discharge']=='R'))] - df['dICUrec'][((df['ICU_transfer']=='Oui')&(df['status_discharge']=='R'))]
).quantile(q=quantile/100)
# Gamma fit
v = ((df['dt_discharge'][((df.ICU_transfer=='Oui')&(df.status_discharge=='R'))] - df['dt_admission'][((df.ICU_transfer=='Oui')&(df.status_discharge=='R'))])/datetime.timedelta(days=1)- df['d_transfer'][((df['ICU_transfer']=='Oui')&(df['status_discharge']=='R'))] - df['dICUrec'][((df['ICU_transfer']=='Oui')&(df['status_discharge']=='R'))])
v[v==0] = 0.01
v = [x for x in v if (math.isnan(x) == False)]
v = [x for x in v if (x > 0)]
averages['dICU_R','sample_size'] = len(v)
averages['dICU_R','shape'],averages['dICU_R','loc'],averages['dICU_R','scale'] = gamma.fit(v, floc=0)
# ------
# dICU,D
# ------
# Summary statistics
averages['dICU_D','mean'] = ((df['dt_discharge'][((df.ICU_transfer=='Oui')&(df.status_discharge=='D'))] - df['dt_admission'][((df.ICU_transfer=='Oui')&(df.status_discharge=='D'))])/datetime.timedelta(days=1) - df['d_transfer'][((df['ICU_transfer']=='Oui')&(df['status_discharge']=='D'))]).mean()
averages['dICU_D','median'] = ((df['dt_discharge'][((df.ICU_transfer=='Oui')&(df.status_discharge=='D'))] - df['dt_admission'][((df.ICU_transfer=='Oui')&(df.status_discharge=='D'))])/datetime.timedelta(days=1) - df['d_transfer'][((df['ICU_transfer']=='Oui')&(df['status_discharge']=='D'))]).median()
for quantile in quantiles:
averages['dICU_D','Q'+str(quantile)] = ((df['dt_discharge'][((df.ICU_transfer=='Oui')&(df.status_discharge=='D'))] - df['dt_admission'][((df.ICU_transfer=='Oui')&(df.status_discharge=='D'))])/datetime.timedelta(days=1) - df['d_transfer'][((df['ICU_transfer']=='Oui')&(df['status_discharge']=='D'))]).quantile(q=quantile/100)
# Gamma fit
v = ((df['dt_discharge'][((df.ICU_transfer=='Oui')&(df.status_discharge=='D'))] - df['dt_admission'][((df.ICU_transfer=='Oui')&(df.status_discharge=='D'))])/datetime.timedelta(days=1) - df['d_transfer'][((df['ICU_transfer']=='Oui')&(df['status_discharge']=='D'))])
v[v==0] = 0.01
v = [x for x in v if (math.isnan(x) == False)]
v = [x for x in v if (x > 0)]
averages['dICU_D','sample_size'] = len(v)
averages['dICU_D','shape'],averages['dICU_D','loc'],averages['dICU_D','scale'] = gamma.fit(v, floc=0)
# ----
# dICU
# ----
# Summary statistics
averages['dICU','mean'] = np.nanmean(samples_total['dICU'][0])
averages['dICU','median'] = np.nanmedian(samples_total['dICU'][0])#((df['dt_discharge'][df.ICU_transfer=='Oui'] - df['dt_admission'][df.ICU_transfer=='Oui'])/datetime.timedelta(days=1)- df['d_transfer'][df['ICU_transfer']=='Oui']).median()
for quantile in quantiles:
averages['dICU','Q'+str(quantile)] = np.nanquantile(samples_total['dICU'][0],q=quantile/100)#((df['dt_discharge'][df.ICU_transfer=='Oui'] - df['dt_admission'][df.ICU_transfer=='Oui'])/datetime.timedelta(days=1)- df['d_transfer'][df['ICU_transfer']=='Oui']).quantile(q=quantile/100)
# Gamma fit
v = samples_total['dICU'][0]#((df['dt_discharge'][df.ICU_transfer=='Oui'] - df['dt_admission'][df.ICU_transfer=='Oui'])/datetime.timedelta(days=1)- df['d_transfer'][df['ICU_transfer']=='Oui'])
v[(v==0)] = 0.01
v = [x for x in v if (math.isnan(x) == False)]
v = [x for x in v if (x > 0)]
averages['dICU','sample_size'] = len(v)
averages['dICU','shape'],averages['dICU','loc'],averages['dICU','scale'] = gamma.fit(v, floc=0)
# --------
# dICU,rec
# --------
averages['dICUrec','mean'] = df['dICUrec'].mean()
averages['dICUrec','median'] = df['dICUrec'].median()
for quantile in quantiles:
averages['dICUrec','Q'+str(quantile)] = df['dICUrec'].quantile(q=quantile/100)
v = df['dICUrec']
v = [x for x in v if (math.isnan(x) == False)]
v = [x for x in v if (x > 0)]
averages['dICUrec','sample_size'] = len(v)
averages['dICUrec','shape'],averages['dICUrec','loc'],averages['dICUrec','scale'] = gamma.fit(v, floc=0)
# ----------
# d_transfer
# ----------
averages['d_transfer','mean'] = np.mean(values_d_transfer)
averages['d_transfer','median'] = np.median(values_d_transfer)
for quantile in quantiles:
averages['d_transfer','Q'+str(quantile)] = np.quantile(values_d_transfer,q=quantile/100)
averages['d_transfer','shape'], averages['d_transfer','loc'], averages['d_transfer', 'scale'] = gamma.fit(values, floc=0)
averages['d_transfer','sample_size'] = len(values)
# ----------
# d_hospital
# ----------
df['dt_onset'] = | pd.to_datetime(df['dt_onset']) | pandas.to_datetime |
import os
import pandas
def getProfInfo(ProfFile):
f=open(ProfFile)
lines=f.readlines()
f.close()
return lines
curDirect=os.getcwd()
os.chdir(curDirect+"/Data")
UniFiles=iter(os.listdir(curDirect+"/Data"))
data={'Name':[],'Profile Link':[],'Department Website':[],'E-mail':[],'Interests':[]}
for unifile in UniFiles:
os.chdir(curDirect+"/Data/"+unifile)
ProfFiles=iter(os.listdir(curDirect+"/Data/"+unifile))
for prof in ProfFiles:
info=getProfInfo(prof)
print("Scanning:"+unifile+"/"+prof)
# if len(info)<4:
# for i in range(4-len(info)):
# info.append('')
data['Name'].append(info[0][:-1])
data['Profile Link'].append(info[1][:-1])
data['Department Website'].append(info[2][:-1])
data['E-mail'].append(info[3][:-1])
data['Interests'].append(info[4][:-1])
df= | pandas.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
from ini.ini import *
from constant.constant import *
import time
import pickle
# import keras as ks
class Deep_Learning:
def __init__(self,env):
self.__env = env
self.__model = None
pass
def get_env(self):
return self.__env
def set_env(self,env):
self.__env = env
def get_model(self):
return self.__model
def set_model(self, model):
self.__model = model
def build_net_blstm(self):
model = ks.Sequential()
model.add(
ks.layers.Bidirectional(ks.layers.LSTM(
50
),input_shape=(11,10))
)
model.add(
ks.layers.Dropout(0.01)
)
model.add(ks.layers.Dense(256))
model.add(
ks.layers.Dropout(0.01)
)
model.add(ks.layers.Dense(64))
model.add(ks.layers.Dense(1))
model.compile(optimizer='sgd', loss='mse')
model.summary()
self.set_model(model)
def build_net_cnn(self):
model = ks.Sequential()
model.add(
ks.layers.Conv2D(
256,
kernel_size=(5, 5),
strides=(1, 1),
activation='relu',
input_shape=(11,50,1)
)
)
# model.add(
# ks.layers.MaxPooling2D(
# pool_size=(2, 2),
# strides=(2, 2)
# )
# )
model.add(
ks.layers.Conv2D(
256,
kernel_size=(3, 3),
strides=(1, 1),
activation='relu',
)
)
model.add(
ks.layers.MaxPooling2D(
pool_size=(2, 2),
strides=(2, 2)
)
)
model.add(
ks.layers.Dropout(0.01)
)
model.add(ks.layers.Flatten())
model.add(ks.layers.Dense(256))
model.add(
ks.layers.Dropout(0.01)
)
model.add(ks.layers.Dense(64))
model.add(ks.layers.Dense(1))
model.compile(optimizer='sgd', loss='mse')
model.summary()
self.set_model(model)
def train(self,islocal=False):
if islocal:
res = pd.read_csv(os.path.join(RESULTS, Y_HAT + '2.csv'))
else:
model = self.get_model()
date_list = os.listdir(os.path.join(RESULTS,'train'))
res = pd.DataFrame()
for date in date_list:
with open(os.path.join(RESULTS,'train',date),'rb') as f:
data = pickle.load(f)
model.fit(
np.array(data['train']).reshape((len(data['train']), 11, 10)),
np.array(data['label']),
batch_size=1024,
epochs=400,
)
data_cell = | pd.DataFrame(columns=[COM_SEC, COM_DATE, Y_HAT]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@author: LeeZChuan
"""
import pandas as pd
import numpy as np
import requests
import os
from pandas.core.frame import DataFrame
import json
import datetime
import time
pd.set_option('display.max_columns',1000)
pd.set_option('display.width', 1000)
pd.set_option('display.max_colwidth',1000)
def addressProcess(address):
result = address
if '镇' in address:
item = address.split('镇')
result = item[0]+'镇'
elif '农场' in address:
item = address.split('农场')
result = item[0]+'农场'
elif '街道' in address:
item = address.split('街道')
result = item[0]+'街道'
elif '路' in address:
item = address.split('路')
result = item[0]+'路'
elif '大道' in address:
item = address.split('大道')
result = item[0]+'大道'
elif '街' in address:
item = address.split('街')
result = item[0]+'街'
elif '村' in address:
item = address.split('村')
result = item[0]+'村'
return result
def processJson(filePath):
orderNum = 0 #订单数
with open(filepath, 'r', encoding="utf-8") as f:
# 读取所有行 每行会是一个字符串
i = 0
for jsonstr in f.readlines():
list_address = []
list_name = []
jsonstr = jsonstr[1:-1]
# listValue = jsonstr.split(']];,')
listValue = jsonstr.split(']],')
for listitem in listValue:
listitem = listitem[1:]
listCon = listitem.split(',[')
listAddr = listCon[3][:-1].split(',')
if len(listAddr) == 2 and '海南省海口市' in listAddr[0] and '海南省海口市' in listAddr[1]:
list_address_each = []
startAdd = addressProcess(listAddr[0][6:])
endAdd = addressProcess(listAddr[1][6:])
if startAdd != endAdd:
list_address_each.append(startAdd)
list_address_each.append(endAdd)
list_address.append(list_address_each)
list_name.append(startAdd)
list_name.append(endAdd)
pd_list_address = pd.DataFrame(list_name)
# print (pd_list_address)
name_list_count = pd.value_counts(pd_list_address[0], sort=False)
name_df = pd_list_address[0].unique()
name_list = name_df.tolist()
name_list_all = [[name, name_list_count[name]] for name in name_list if name_list_count[name] > 300]
name_list_new = []
for item in name_list_all:
name_list_new.append(item[0])
print (name_list_new)
new_list_address = []
for item in list_address:
if item[0] in name_list_new and item[1] in name_list_new:
new_list = []
new_list.append(item[0])
new_list.append(item[1])
new_list_address.append(new_list)
orderNum += 1
return orderNum, list_address
def save(filename, contents):
fh = open(filename, 'w', encoding='utf-8')
fh.write(contents)
fh.close()
def dataSta(list_address, txtname):
raw_file_df = | pd.DataFrame(list_address) | pandas.DataFrame |
# Fundamental libraries
import os
import re
import sys
import time
import glob
import random
import datetime
import warnings
import itertools
import numpy as np
import pandas as pd
import pickle as cp
import seaborn as sns
import multiprocessing
from scipy import stats
from pathlib import Path
from ast import literal_eval
import matplotlib.pyplot as plt
from collections import Counter
from scipy.special import logit
from argparse import ArgumentParser
from pandas.api.types import CategoricalDtype
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
warnings.filterwarnings(action="ignore")
# SciKit-Learn methods
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, roc_curve
from sklearn.preprocessing import LabelEncoder, KBinsDiscretizer, OneHotEncoder, StandardScaler
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.utils import resample
from sklearn.utils.class_weight import compute_class_weight
# StatsModel methods
from statsmodels.nonparametric.smoothers_lowess import lowess
from statsmodels.miscmodels.ordinal_model import OrderedModel
from statsmodels.discrete.discrete_model import Logit
from statsmodels.tools.tools import add_constant
# TQDM for progress tracking
from tqdm import tqdm
# Function to load and compile test prediction files
def collect_preds(pred_file_info,progress_bar = True, progress_bar_desc = ''):
output_df = []
if progress_bar:
iterator = tqdm(range(pred_file_info.shape[0]),desc=progress_bar_desc)
else:
iterator = range(pred_file_info.shape[0])
for i in iterator:
curr_pred = pd.read_csv(pred_file_info.file[i])
curr_pred['repeat'] = pred_file_info.repeat[i]
curr_pred['fold'] = pred_file_info.fold[i]
output_df.append(curr_pred)
return pd.concat(output_df,ignore_index=True)
# Function to load and compile test performance metrics for DeepIMPACT models
def collect_metrics(metric_file_info,progress_bar = True, progress_bar_desc = ''):
output_df = []
if progress_bar:
iterator = tqdm(metric_file_info.file,desc=progress_bar_desc)
else:
iterator = metric_file_info.file
return pd.concat([pd.read_csv(f) for f in iterator],ignore_index=True)
# Function to calculate ordinal c-index via bootstrapping
def calc_bs_ORC(curr_resamples, compiled_test_preds, progress_bar = True, progress_bar_desc = ''):
if progress_bar:
iterator = tqdm(range(curr_resamples.shape[0]),desc=progress_bar_desc)
else:
iterator = range(curr_resamples.shape[0])
num_classes = len(compiled_test_preds.TrueLabel.unique())
compiled_orc = []
compiled_steps = []
compiled_rs_idx = []
for curr_rs_row in iterator:
compiled_rs_idx.append(curr_resamples.RESAMPLE_IDX[curr_rs_row])
curr_in_sample = curr_resamples.GUPIs[curr_rs_row]
curr_rs_preds = compiled_test_preds[compiled_test_preds.GUPI.isin(curr_in_sample)].reset_index(drop=True)
prob_cols = [col for col in curr_rs_preds if col.startswith('Pr(GOSE=')]
pairs = []
aucs = []
for ix, (a, b) in enumerate(itertools.combinations(np.sort(curr_rs_preds.TrueLabel.unique()), 2)):
filt_rs_preds = curr_rs_preds[curr_rs_preds.TrueLabel.isin([a,b])].reset_index(drop=True)
filt_rs_preds['ConditProb'] = filt_rs_preds[prob_cols[b]]/(filt_rs_preds[prob_cols[a]] + filt_rs_preds[prob_cols[b]])
filt_rs_preds['ConditLabel'] = (filt_rs_preds.TrueLabel == b).astype(int)
aucs.append(roc_auc_score(filt_rs_preds['ConditLabel'],filt_rs_preds['ConditProb']))
pairs.append((a,b))
compiled_orc.append(np.mean(aucs))
compiled_steps.append((1 - np.mean(aucs))*(num_classes*(num_classes-1)/2))
return pd.DataFrame({'RESAMPLE_IDX':compiled_rs_idx,'ORC':compiled_orc,'S':compiled_steps})
# Function to calculate generalised c-index via bootstrapping
def calc_bs_gen_c(curr_resamples, compiled_test_preds, progress_bar = True, progress_bar_desc = ''):
if progress_bar:
iterator = tqdm(range(curr_resamples.shape[0]),desc=progress_bar_desc)
else:
iterator = range(curr_resamples.shape[0])
num_classes = len(compiled_test_preds.TrueLabel.unique())
compiled_gen_c = []
compiled_D = []
compiled_rs_idx = []
for curr_rs_row in iterator:
compiled_rs_idx.append(curr_resamples.RESAMPLE_IDX[curr_rs_row])
curr_in_sample = curr_resamples.GUPIs[curr_rs_row]
curr_rs_preds = compiled_test_preds[compiled_test_preds.GUPI.isin(curr_in_sample)].reset_index(drop=True)
prob_cols = [col for col in curr_rs_preds if col.startswith('Pr(GOSE=')]
pairs = []
aucs = []
prevalence = []
for ix, (a, b) in enumerate(itertools.combinations(np.sort(curr_rs_preds.TrueLabel.unique()), 2)):
filt_rs_preds = curr_rs_preds[curr_rs_preds.TrueLabel.isin([a,b])].reset_index(drop=True)
filt_rs_preds['ConditProb'] = filt_rs_preds[prob_cols[b]]/(filt_rs_preds[prob_cols[a]] + filt_rs_preds[prob_cols[b]])
filt_rs_preds['ConditLabel'] = (filt_rs_preds.TrueLabel == b).astype(int)
prevalence.append((filt_rs_preds.TrueLabel == a).sum()*(filt_rs_preds.TrueLabel == b).sum())
aucs.append(roc_auc_score(filt_rs_preds['ConditLabel'],filt_rs_preds['ConditProb']))
pairs.append((a,b))
compiled_gen_c.append(np.sum(np.multiply(aucs,prevalence))/np.sum(prevalence))
compiled_D.append(2*(np.sum(np.multiply(aucs,prevalence))/np.sum(prevalence))-1)
return pd.DataFrame({'RESAMPLE_IDX':compiled_rs_idx,'Gen_C':compiled_gen_c,'D_xy':compiled_D})
# Function to calculate threshold-level AUROCs
def calc_bs_thresh_AUC(curr_resamples, compiled_test_preds, progress_bar = True, progress_bar_desc = ''):
thresh_labels = ['GOSE>1','GOSE>3','GOSE>4','GOSE>5','GOSE>6','GOSE>7']
compiled_AUCs = []
if progress_bar:
iterator = tqdm(range(curr_resamples.shape[0]),desc=progress_bar_desc)
else:
iterator = range(curr_resamples.shape[0])
for curr_rs_row in iterator:
curr_in_sample = curr_resamples.GUPIs[curr_rs_row]
curr_rs_preds = compiled_test_preds[compiled_test_preds.GUPI.isin(curr_in_sample)].reset_index(drop=True)
prob_cols = [col for col in curr_rs_preds if col.startswith('Pr(GOSE=')]
for thresh in range(1,len(prob_cols)):
cols_gt = prob_cols[thresh:]
prob_gt = curr_rs_preds[cols_gt].sum(1).values
gt = (curr_rs_preds['TrueLabel'] >= thresh).astype(int).values
curr_AUC = roc_auc_score(gt, prob_gt)
compiled_AUCs.append(pd.DataFrame({'RESAMPLE_IDX':[curr_resamples.RESAMPLE_IDX[curr_rs_row]],'Threshold':thresh_labels[thresh-1],'AUC':curr_AUC},index=[0]))
return pd.concat(compiled_AUCs,ignore_index = True)
# Function to calculate normalized confusion matrices
def calc_bs_cm(curr_resamples, compiled_test_preds, progress_bar = True, progress_bar_desc = ''):
compiled_cm = []
if progress_bar:
iterator = tqdm(range(curr_resamples.shape[0]),desc=progress_bar_desc)
else:
iterator = range(curr_resamples.shape[0])
for curr_rs_row in iterator:
curr_in_sample = curr_resamples.GUPIs[curr_rs_row]
curr_rs_preds = compiled_test_preds[compiled_test_preds.GUPI.isin(curr_in_sample)].reset_index(drop=True)
prob_cols = [col for col in curr_rs_preds if col.startswith('Pr(GOSE=')]
curr_rs_cm = confusion_matrix(curr_rs_preds.TrueLabel, curr_rs_preds.PredLabel,normalize='true')
curr_rs_cm = pd.DataFrame(curr_rs_cm)
curr_rs_cm.columns = ['GOSE: 1','GOSE: 2/3','GOSE: 4','GOSE: 5','GOSE: 6','GOSE: 7','GOSE: 8']
curr_rs_cm = curr_rs_cm.assign(TrueLabel=['GOSE: 1','GOSE: 2/3','GOSE: 4','GOSE: 5','GOSE: 6','GOSE: 7','GOSE: 8'])
curr_rs_cm = curr_rs_cm.melt(id_vars=['TrueLabel'],var_name='PredLabel',value_name='cm_prob')
curr_rs_cm['RESAMPLE_IDX'] = curr_resamples.RESAMPLE_IDX[curr_rs_row]
compiled_cm.append(curr_rs_cm)
return pd.concat(compiled_cm,ignore_index = True)
# Function to calculate accuracy
def calc_bs_accuracy(curr_resamples, compiled_test_preds, progress_bar = True, progress_bar_desc = ''):
if progress_bar:
iterator = tqdm(range(curr_resamples.shape[0]),desc=progress_bar_desc)
else:
iterator = range(curr_resamples.shape[0])
compiled_accuracy = []
compiled_rs_idx = []
for curr_rs_row in iterator:
compiled_rs_idx.append(curr_resamples.RESAMPLE_IDX[curr_rs_row])
curr_in_sample = curr_resamples.GUPIs[curr_rs_row]
curr_rs_preds = compiled_test_preds[compiled_test_preds.GUPI.isin(curr_in_sample)].reset_index(drop=True)
prob_cols = [col for col in curr_rs_preds if col.startswith('Pr(GOSE=')]
compiled_accuracy.append(accuracy_score(curr_rs_preds.TrueLabel, curr_rs_preds.PredLabel))
return | pd.DataFrame({'RESAMPLE_IDX':compiled_rs_idx,'Accuracy':compiled_accuracy}) | pandas.DataFrame |
import numpy as np
import pandas as pd
from woodwork.logical_types import (
URL,
Age,
AgeNullable,
Boolean,
BooleanNullable,
Categorical,
CountryCode,
Datetime,
Double,
EmailAddress,
Filepath,
Integer,
IntegerNullable,
IPAddress,
LatLong,
NaturalLanguage,
Ordinal,
PersonFullName,
PhoneNumber,
PostalCode,
SubRegionCode,
Timedelta
)
from woodwork.statistics_utils import (
_get_describe_dict,
_get_mode,
_make_categorical_for_mutual_info,
_replace_nans_for_mutual_info
)
from woodwork.tests.testing_utils import mi_between_cols, to_pandas
from woodwork.utils import import_or_none
dd = import_or_none('dask.dataframe')
ks = import_or_none('databricks.koalas')
def test_get_mode():
series_list = [
pd.Series([1, 2, 3, 4, 2, 2, 3]),
pd.Series(['a', 'b', 'b', 'c', 'b']),
pd.Series([3, 2, 3, 2]),
pd.Series([np.nan, np.nan, np.nan]),
pd.Series([pd.NA, pd.NA, pd.NA]),
pd.Series([1, 2, np.nan, 2, np.nan, 3, 2]),
pd.Series([1, 2, pd.NA, 2, pd.NA, 3, 2])
]
answer_list = [2, 'b', 2, None, None, 2, 2]
for series, answer in zip(series_list, answer_list):
mode = _get_mode(series)
if answer is None:
assert mode is None
else:
assert mode == answer
def test_accessor_replace_nans_for_mutual_info():
df_nans = pd.DataFrame({
'ints': pd.Series([2, pd.NA, 5, 2], dtype='Int64'),
'floats': pd.Series([3.3, None, 2.3, 1.3]),
'bools': pd.Series([True, None, True, False]),
'bools_pdna': pd.Series([True, pd.NA, True, False], dtype='boolean'),
'int_to_cat_nan': pd.Series([1, np.nan, 3, 1], dtype='category'),
'str': pd.Series(['test', np.nan, 'test2', 'test']),
'str_no_nan': pd.Series(['test', 'test2', 'test2', 'test']),
'dates': | pd.Series(['2020-01-01', None, '2020-01-02', '2020-01-03']) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2014-2019 OpenEEmeter contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime, timedelta
from pkg_resources import resource_stream
import numpy as np
import pandas as pd
import pytest
import pytz
from eemeter.transform import (
as_freq,
clean_caltrack_billing_data,
downsample_and_clean_caltrack_daily_data,
clean_caltrack_billing_daily_data,
day_counts,
get_baseline_data,
get_reporting_data,
get_terms,
remove_duplicates,
NoBaselineDataError,
NoReportingDataError,
overwrite_partial_rows_with_nan,
)
def test_as_freq_not_series(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
with pytest.raises(ValueError):
as_freq(meter_data, freq="H")
def test_as_freq_hourly(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
as_hourly = as_freq(meter_data.value, freq="H")
assert as_hourly.shape == (18961,)
assert round(meter_data.value.sum(), 1) == round(as_hourly.sum(), 1) == 21290.2
def test_as_freq_daily(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
as_daily = as_freq(meter_data.value, freq="D")
assert as_daily.shape == (792,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 21290.2
def test_as_freq_daily_all_nones_instantaneous(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
meter_data["value"] = np.nan
assert meter_data.shape == (27, 1)
as_daily = as_freq(meter_data.value, freq="D", series_type="instantaneous")
assert as_daily.shape == (792,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 0
def test_as_freq_daily_all_nones(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
meter_data["value"] = np.nan
assert meter_data.shape == (27, 1)
as_daily = as_freq(meter_data.value, freq="D")
assert as_daily.shape == (792,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 0
def test_as_freq_month_start(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
as_month_start = as_freq(meter_data.value, freq="MS")
assert as_month_start.shape == (28,)
assert round(meter_data.value.sum(), 1) == round(as_month_start.sum(), 1) == 21290.2
def test_as_freq_hourly_temperature(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
assert temperature_data.shape == (19417,)
as_hourly = as_freq(temperature_data, freq="H", series_type="instantaneous")
assert as_hourly.shape == (19417,)
assert round(temperature_data.mean(), 1) == round(as_hourly.mean(), 1) == 54.6
def test_as_freq_daily_temperature(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
assert temperature_data.shape == (19417,)
as_daily = as_freq(temperature_data, freq="D", series_type="instantaneous")
assert as_daily.shape == (811,)
assert abs(temperature_data.mean() - as_daily.mean()) <= 0.1
def test_as_freq_month_start_temperature(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
assert temperature_data.shape == (19417,)
as_month_start = as_freq(temperature_data, freq="MS", series_type="instantaneous")
assert as_month_start.shape == (29,)
assert round(as_month_start.mean(), 1) == 53.4
def test_as_freq_daily_temperature_monthly(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temperature_data = temperature_data.groupby(pd.Grouper(freq="MS")).mean()
assert temperature_data.shape == (28,)
as_daily = as_freq(temperature_data, freq="D", series_type="instantaneous")
assert as_daily.shape == (824,)
assert round(as_daily.mean(), 1) == 54.5
def test_as_freq_empty():
meter_data = pd.DataFrame({"value": []})
empty_meter_data = as_freq(meter_data.value, freq="H")
assert empty_meter_data.empty
def test_as_freq_perserves_nulls(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
monthly_with_nulls = meter_data[meter_data.index.year != 2016].reindex(
meter_data.index
)
daily_with_nulls = as_freq(monthly_with_nulls.value, freq="D")
assert (
round(monthly_with_nulls.value.sum(), 2)
== round(daily_with_nulls.sum(), 2)
== 11094.05
)
assert monthly_with_nulls.value.isnull().sum() == 13
assert daily_with_nulls.isnull().sum() == 365
def test_day_counts(il_electricity_cdd_hdd_billing_monthly):
data = il_electricity_cdd_hdd_billing_monthly["meter_data"].value
counts = day_counts(data.index)
assert counts.shape == (27,)
assert counts.iloc[0] == 29.0
assert pd.isnull(counts.iloc[-1])
assert counts.sum() == 790.0
def test_day_counts_empty_series():
index = pd.DatetimeIndex([])
index.freq = None
data = pd.Series([], index=index)
counts = day_counts(data.index)
assert counts.shape == (0,)
def test_get_baseline_data(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
baseline_data, warnings = get_baseline_data(meter_data)
assert meter_data.shape == baseline_data.shape == (19417, 1)
assert len(warnings) == 0
def test_get_baseline_data_with_timezones(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data.tz_convert("America/New_York")
)
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data.tz_convert("Australia/Sydney")
)
assert len(warnings) == 0
def test_get_baseline_data_with_end(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_start_date = il_electricity_cdd_hdd_hourly["blackout_start_date"]
baseline_data, warnings = get_baseline_data(meter_data, end=blackout_start_date)
assert meter_data.shape != baseline_data.shape == (8761, 1)
assert len(warnings) == 0
def test_get_baseline_data_with_end_no_max_days(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_start_date = il_electricity_cdd_hdd_hourly["blackout_start_date"]
baseline_data, warnings = get_baseline_data(
meter_data, end=blackout_start_date, max_days=None
)
assert meter_data.shape != baseline_data.shape == (9595, 1)
assert len(warnings) == 0
def test_get_baseline_data_empty(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_start_date = il_electricity_cdd_hdd_hourly["blackout_start_date"]
with pytest.raises(NoBaselineDataError):
get_baseline_data(meter_data, end=pd.Timestamp("2000").tz_localize("UTC"))
def test_get_baseline_data_start_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
start = meter_data.index.min() - timedelta(days=1)
baseline_data, warnings = get_baseline_data(meter_data, start=start, max_days=None)
assert meter_data.shape == baseline_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_baseline_data.gap_at_baseline_start"
assert (
warning.description
== "Data does not have coverage at requested baseline start date."
)
assert warning.data == {
"data_start": "2015-11-22T06:00:00+00:00",
"requested_start": "2015-11-21T06:00:00+00:00",
}
def test_get_baseline_data_end_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
end = meter_data.index.max() + timedelta(days=1)
baseline_data, warnings = get_baseline_data(meter_data, end=end, max_days=None)
assert meter_data.shape == baseline_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_baseline_data.gap_at_baseline_end"
assert (
warning.description
== "Data does not have coverage at requested baseline end date."
)
assert warning.data == {
"data_end": "2018-02-08T06:00:00+00:00",
"requested_end": "2018-02-09T06:00:00+00:00",
}
def test_get_baseline_data_with_overshoot(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=32,
allow_billing_period_overshoot=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=32,
allow_billing_period_overshoot=False,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=True,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_baseline_data_with_ignored_gap(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=45,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=45,
ignore_billing_period_gap_for_day_count=False,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=25,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_baseline_data_with_overshoot_and_ignored_gap(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=True,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=False,
ignore_billing_period_gap_for_day_count=False,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_baseline_data_n_days_billing_period_overshoot(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2017, 11, 9, tzinfo=pytz.UTC),
max_days=45,
allow_billing_period_overshoot=True,
n_days_billing_period_overshoot=45,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 526.25
assert len(warnings) == 0
def test_get_baseline_data_too_far_from_date(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
end_date = datetime(2020, 11, 9, tzinfo=pytz.UTC)
max_days = 45
baseline_data, warnings = get_baseline_data(
meter_data,
end=end_date,
max_days=max_days,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 1393.4
assert len(warnings) == 0
with pytest.raises(NoBaselineDataError):
get_baseline_data(
meter_data,
end=end_date,
max_days=max_days,
n_days_billing_period_overshoot=45,
ignore_billing_period_gap_for_day_count=True,
)
baseline_data, warnings = get_baseline_data(
meter_data,
end=end_date,
max_days=max_days,
allow_billing_period_overshoot=True,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (3, 1)
assert round(baseline_data.value.sum(), 2) == 2043.92
assert len(warnings) == 0
# Includes 3 data points because data at index -3 is closer to start target
# then data at index -2
start_target = baseline_data.index[-1] - timedelta(days=max_days)
assert abs((baseline_data.index[0] - start_target).days) < abs(
(baseline_data.index[1] - start_target).days
)
with pytest.raises(NoBaselineDataError):
get_baseline_data(
meter_data,
end=end_date,
max_days=max_days,
allow_billing_period_overshoot=True,
n_days_billing_period_overshoot=45,
ignore_billing_period_gap_for_day_count=True,
)
def test_get_reporting_data(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
reporting_data, warnings = get_reporting_data(meter_data)
assert meter_data.shape == reporting_data.shape == (19417, 1)
assert len(warnings) == 0
def test_get_reporting_data_with_timezones(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
reporting_data, warnings = get_reporting_data(
meter_data.tz_convert("America/New_York")
)
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data.tz_convert("Australia/Sydney")
)
assert len(warnings) == 0
def test_get_reporting_data_with_start(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_end_date = il_electricity_cdd_hdd_hourly["blackout_end_date"]
reporting_data, warnings = get_reporting_data(meter_data, start=blackout_end_date)
assert meter_data.shape != reporting_data.shape == (8761, 1)
assert len(warnings) == 0
def test_get_reporting_data_with_start_no_max_days(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_end_date = il_electricity_cdd_hdd_hourly["blackout_end_date"]
reporting_data, warnings = get_reporting_data(
meter_data, start=blackout_end_date, max_days=None
)
assert meter_data.shape != reporting_data.shape == (9607, 1)
assert len(warnings) == 0
def test_get_reporting_data_empty(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_end_date = il_electricity_cdd_hdd_hourly["blackout_end_date"]
with pytest.raises(NoReportingDataError):
get_reporting_data(meter_data, start=pd.Timestamp("2030").tz_localize("UTC"))
def test_get_reporting_data_start_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
start = meter_data.index.min() - timedelta(days=1)
reporting_data, warnings = get_reporting_data(
meter_data, start=start, max_days=None
)
assert meter_data.shape == reporting_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_reporting_data.gap_at_reporting_start"
assert (
warning.description
== "Data does not have coverage at requested reporting start date."
)
assert warning.data == {
"data_start": "2015-11-22T06:00:00+00:00",
"requested_start": "2015-11-21T06:00:00+00:00",
}
def test_get_reporting_data_end_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
end = meter_data.index.max() + timedelta(days=1)
reporting_data, warnings = get_reporting_data(meter_data, end=end, max_days=None)
assert meter_data.shape == reporting_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_reporting_data.gap_at_reporting_end"
assert (
warning.description
== "Data does not have coverage at requested reporting end date."
)
assert warning.data == {
"data_end": "2018-02-08T06:00:00+00:00",
"requested_end": "2018-02-09T06:00:00+00:00",
}
def test_get_reporting_data_with_overshoot(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=30,
allow_billing_period_overshoot=True,
)
assert reporting_data.shape == (2, 1)
assert round(reporting_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=30,
allow_billing_period_overshoot=False,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=True,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_reporting_data_with_ignored_gap(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=45,
ignore_billing_period_gap_for_day_count=True,
)
assert reporting_data.shape == (2, 1)
assert round(reporting_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=45,
ignore_billing_period_gap_for_day_count=False,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=25,
ignore_billing_period_gap_for_day_count=True,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_reporting_data_with_overshoot_and_ignored_gap(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=True,
ignore_billing_period_gap_for_day_count=True,
)
assert reporting_data.shape == (2, 1)
assert round(reporting_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=False,
ignore_billing_period_gap_for_day_count=False,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_terms_unrecognized_method(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
with pytest.raises(ValueError):
get_terms(meter_data.index, term_lengths=[365], method="unrecognized")
def test_get_terms_unsorted_index(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
with pytest.raises(ValueError):
get_terms(meter_data.index[::-1], term_lengths=[365])
def test_get_terms_bad_term_labels(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
with pytest.raises(ValueError):
terms = get_terms(
meter_data.index,
term_lengths=[60, 60, 60],
term_labels=["abc", "def"], # too short
)
def test_get_terms_default_term_labels(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
terms = get_terms(meter_data.index, term_lengths=[60, 60, 60])
assert [t.label for t in terms] == ["term_001", "term_002", "term_003"]
def test_get_terms_custom_term_labels(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
terms = get_terms(
meter_data.index, term_lengths=[60, 60, 60], term_labels=["abc", "def", "ghi"]
)
assert [t.label for t in terms] == ["abc", "def", "ghi"]
def test_get_terms_empty_index_input(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
terms = get_terms(meter_data.index[:0], term_lengths=[60, 60, 60])
assert len(terms) == 0
def test_get_terms_strict(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
strict_terms = get_terms(
meter_data.index,
term_lengths=[365, 365],
term_labels=["year1", "year2"],
start=datetime(2016, 1, 15, tzinfo=pytz.UTC),
method="strict",
)
assert len(strict_terms) == 2
year1 = strict_terms[0]
assert year1.label == "year1"
assert year1.index.shape == (12,)
assert (
year1.target_start_date
== pd.Timestamp("2016-01-15 00:00:00+0000", tz="UTC").to_pydatetime()
)
assert (
year1.target_end_date
== pd.Timestamp("2017-01-14 00:00:00+0000", tz="UTC").to_pydatetime()
)
assert year1.target_term_length_days == 365
assert (
year1.actual_start_date
== year1.index[0]
== pd.Timestamp("2016-01-22 06:00:00+0000", tz="UTC")
)
assert (
year1.actual_end_date
== year1.index[-1]
== pd.Timestamp("2016-12-19 06:00:00+0000", tz="UTC")
)
assert year1.actual_term_length_days == 332
assert year1.complete
year2 = strict_terms[1]
assert year2.index.shape == (13,)
assert year2.label == "year2"
assert year2.target_start_date == pd.Timestamp("2016-12-19 06:00:00+0000", tz="UTC")
assert (
year2.target_end_date
== pd.Timestamp("2018-01-14 00:00:00+0000", tz="UTC").to_pydatetime()
)
assert year2.target_term_length_days == 365
assert (
year2.actual_start_date
== year2.index[0]
== pd.Timestamp("2016-12-19 06:00:00+00:00", tz="UTC")
)
assert (
year2.actual_end_date
== year2.index[-1]
== pd.Timestamp("2017-12-22 06:00:00+0000", tz="UTC")
)
assert year2.actual_term_length_days == 368
assert year2.complete
def test_get_terms_nearest(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
nearest_terms = get_terms(
meter_data.index,
term_lengths=[365, 365],
term_labels=["year1", "year2"],
start=datetime(2016, 1, 15, tzinfo=pytz.UTC),
method="nearest",
)
assert len(nearest_terms) == 2
year1 = nearest_terms[0]
assert year1.label == "year1"
assert year1.index.shape == (13,)
assert year1.index[0] == pd.Timestamp("2016-01-22 06:00:00+0000", tz="UTC")
assert year1.index[-1] == pd.Timestamp("2017-01-21 06:00:00+0000", tz="UTC")
assert (
year1.target_start_date
== | pd.Timestamp("2016-01-15 00:00:00+0000", tz="UTC") | pandas.Timestamp |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import tempfile
import time
from collections import OrderedDict
from datetime import datetime
from string import printable
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
try:
import fastparquet
except ImportError: # pragma: no cover
fastparquet = None
try:
import sqlalchemy
except ImportError: # pragma: no cover
sqlalchemy = None
from .... import tensor as mt
from .... import dataframe as md
from ....config import option_context
from ....tests.core import require_cudf, require_ray
from ....utils import arrow_array_to_objects, lazy_import, pd_release_version
from ..dataframe import from_pandas as from_pandas_df
from ..series import from_pandas as from_pandas_series
from ..index import from_pandas as from_pandas_index, from_tileable
from ..from_tensor import dataframe_from_tensor, dataframe_from_1d_tileables
from ..from_records import from_records
ray = lazy_import("ray")
_date_range_use_inclusive = pd_release_version[:2] >= (1, 4)
def test_from_pandas_dataframe_execution(setup):
# test empty DataFrame
pdf = pd.DataFrame()
df = from_pandas_df(pdf)
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
pdf = pd.DataFrame(columns=list("ab"))
df = from_pandas_df(pdf)
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
pdf = pd.DataFrame(
np.random.rand(20, 30), index=[np.arange(20), np.arange(20, 0, -1)]
)
df = from_pandas_df(pdf, chunk_size=(13, 21))
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
def test_from_pandas_series_execution(setup):
# test empty Series
ps = pd.Series(name="a")
series = from_pandas_series(ps, chunk_size=13)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
series = from_pandas_series(ps)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
ps = pd.Series(
np.random.rand(20), index=[np.arange(20), np.arange(20, 0, -1)], name="a"
)
series = from_pandas_series(ps, chunk_size=13)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
def test_from_pandas_index_execution(setup):
pd_index = pd.timedelta_range("1 days", periods=10)
index = from_pandas_index(pd_index, chunk_size=7)
result = index.execute().fetch()
pd.testing.assert_index_equal(pd_index, result)
def test_index_execution(setup):
rs = np.random.RandomState(0)
pdf = pd.DataFrame(
rs.rand(20, 10),
index=np.arange(20, 0, -1),
columns=["a" + str(i) for i in range(10)],
)
df = from_pandas_df(pdf, chunk_size=13)
# test df.index
result = df.index.execute().fetch()
pd.testing.assert_index_equal(result, pdf.index)
result = df.columns.execute().fetch()
pd.testing.assert_index_equal(result, pdf.columns)
# df has unknown chunk shape on axis 0
df = df[df.a1 < 0.5]
# test df.index
result = df.index.execute().fetch()
pd.testing.assert_index_equal(result, pdf[pdf.a1 < 0.5].index)
s = pd.Series(pdf["a1"], index=pd.RangeIndex(20))
series = from_pandas_series(s, chunk_size=13)
# test series.index which has value
result = series.index.execute().fetch()
pd.testing.assert_index_equal(result, s.index)
s = pdf["a2"]
series = from_pandas_series(s, chunk_size=13)
# test series.index
result = series.index.execute().fetch()
pd.testing.assert_index_equal(result, s.index)
# test tensor
raw = rs.random(20)
t = mt.tensor(raw, chunk_size=13)
result = from_tileable(t).execute().fetch()
pd.testing.assert_index_equal(result, pd.Index(raw))
def test_initializer_execution(setup):
arr = np.random.rand(20, 30)
pdf = pd.DataFrame(arr, index=[np.arange(20), np.arange(20, 0, -1)])
df = md.DataFrame(pdf, chunk_size=(15, 10))
result = df.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
df = md.DataFrame(arr, index=md.date_range("2020-1-1", periods=20))
result = df.execute().fetch()
pd.testing.assert_frame_equal(
result, pd.DataFrame(arr, index=pd.date_range("2020-1-1", periods=20))
)
df = md.DataFrame(
{"prices": [100, 101, np.nan, 100, 89, 88]},
index=md.date_range("1/1/2010", periods=6, freq="D"),
)
result = df.execute().fetch()
pd.testing.assert_frame_equal(
result,
pd.DataFrame(
{"prices": [100, 101, np.nan, 100, 89, 88]},
index=pd.date_range("1/1/2010", periods=6, freq="D"),
),
)
s = np.random.rand(20)
ps = pd.Series(s, index=[np.arange(20), np.arange(20, 0, -1)], name="a")
series = md.Series(ps, chunk_size=7)
result = series.execute().fetch()
pd.testing.assert_series_equal(ps, result)
series = md.Series(s, index=md.date_range("2020-1-1", periods=20))
result = series.execute().fetch()
pd.testing.assert_series_equal(
result, pd.Series(s, index=pd.date_range("2020-1-1", periods=20))
)
pi = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])
index = md.Index(md.Index(pi))
result = index.execute().fetch()
pd.testing.assert_index_equal(pi, result)
def test_index_only(setup):
df = md.DataFrame(index=[1, 2, 3])
pd.testing.assert_frame_equal(df.execute().fetch(), pd.DataFrame(index=[1, 2, 3]))
s = md.Series(index=[1, 2, 3])
pd.testing.assert_series_equal(s.execute().fetch(), pd.Series(index=[1, 2, 3]))
df = md.DataFrame(index=md.Index([1, 2, 3]))
pd.testing.assert_frame_equal(df.execute().fetch(), pd.DataFrame(index=[1, 2, 3]))
s = md.Series(index=md.Index([1, 2, 3]), dtype=object)
pd.testing.assert_series_equal(
s.execute().fetch(), pd.Series(index=[1, 2, 3], dtype=object)
)
def test_series_from_tensor(setup):
data = np.random.rand(10)
series = md.Series(mt.tensor(data), name="a")
pd.testing.assert_series_equal(series.execute().fetch(), pd.Series(data, name="a"))
series = md.Series(mt.tensor(data, chunk_size=3))
pd.testing.assert_series_equal(series.execute().fetch(), pd.Series(data))
series = md.Series(mt.ones((10,), chunk_size=4))
pd.testing.assert_series_equal(
series.execute().fetch(),
pd.Series(np.ones(10)),
)
index_data = np.random.rand(10)
series = md.Series(
mt.tensor(data, chunk_size=3),
name="a",
index=mt.tensor(index_data, chunk_size=4),
)
pd.testing.assert_series_equal(
series.execute().fetch(), pd.Series(data, name="a", index=index_data)
)
series = md.Series(
mt.tensor(data, chunk_size=3),
name="a",
index=md.date_range("2020-1-1", periods=10),
)
pd.testing.assert_series_equal(
series.execute().fetch(),
pd.Series(data, name="a", index=pd.date_range("2020-1-1", periods=10)),
)
def test_from_tensor_execution(setup):
tensor = mt.random.rand(10, 10, chunk_size=5)
df = dataframe_from_tensor(tensor)
tensor_res = tensor.execute().fetch()
pdf_expected = pd.DataFrame(tensor_res)
df_result = df.execute().fetch()
pd.testing.assert_index_equal(df_result.index, pd.RangeIndex(0, 10))
pd.testing.assert_index_equal(df_result.columns, pd.RangeIndex(0, 10))
pd.testing.assert_frame_equal(df_result, pdf_expected)
# test from tensor with unknown shape
tensor2 = tensor[tensor[:, 0] < 0.9]
df = dataframe_from_tensor(tensor2)
df_result = df.execute().fetch()
tensor_res = tensor2.execute().fetch()
pdf_expected = pd.DataFrame(tensor_res)
pd.testing.assert_frame_equal(df_result.reset_index(drop=True), pdf_expected)
# test converted with specified index_value and columns
tensor2 = mt.random.rand(2, 2, chunk_size=1)
df2 = dataframe_from_tensor(
tensor2, index=pd.Index(["a", "b"]), columns=pd.Index([3, 4])
)
df_result = df2.execute().fetch()
pd.testing.assert_index_equal(df_result.index, pd.Index(["a", "b"]))
pd.testing.assert_index_equal(df_result.columns, pd.Index([3, 4]))
# test converted from 1-d tensor
tensor3 = mt.array([1, 2, 3])
df3 = dataframe_from_tensor(tensor3)
result3 = df3.execute().fetch()
pdf_expected = pd.DataFrame(np.array([1, 2, 3]))
pd.testing.assert_frame_equal(pdf_expected, result3)
# test converted from identical chunks
tensor4 = mt.ones((10, 10), chunk_size=3)
df4 = dataframe_from_tensor(tensor4)
result4 = df4.execute().fetch()
pdf_expected = pd.DataFrame(tensor4.execute().fetch())
pd.testing.assert_frame_equal(pdf_expected, result4)
# from tensor with given index
tensor5 = mt.ones((10, 10), chunk_size=3)
df5 = dataframe_from_tensor(tensor5, index=np.arange(0, 20, 2))
result5 = df5.execute().fetch()
pdf_expected = pd.DataFrame(tensor5.execute().fetch(), index=np.arange(0, 20, 2))
pd.testing.assert_frame_equal(pdf_expected, result5)
# from tensor with given index that is a tensor
raw7 = np.random.rand(10, 10)
tensor7 = mt.tensor(raw7, chunk_size=3)
index_raw7 = np.random.rand(10)
index7 = mt.tensor(index_raw7, chunk_size=4)
df7 = dataframe_from_tensor(tensor7, index=index7)
result7 = df7.execute().fetch()
pdf_expected = pd.DataFrame(raw7, index=index_raw7)
pd.testing.assert_frame_equal(pdf_expected, result7)
# from tensor with given index is a md.Index
raw10 = np.random.rand(10, 10)
tensor10 = mt.tensor(raw10, chunk_size=3)
index10 = md.date_range("2020-1-1", periods=10, chunk_size=3)
df10 = dataframe_from_tensor(tensor10, index=index10)
result10 = df10.execute().fetch()
pdf_expected = pd.DataFrame(raw10, index=pd.date_range("2020-1-1", periods=10))
pd.testing.assert_frame_equal(pdf_expected, result10)
# from tensor with given columns
tensor6 = mt.ones((10, 10), chunk_size=3)
df6 = dataframe_from_tensor(tensor6, columns=list("abcdefghij"))
result6 = df6.execute().fetch()
pdf_expected = pd.DataFrame(tensor6.execute().fetch(), columns=list("abcdefghij"))
pd.testing.assert_frame_equal(pdf_expected, result6)
# from 1d tensors
raws8 = [
("a", np.random.rand(8)),
("b", np.random.randint(10, size=8)),
("c", ["".join(np.random.choice(list(printable), size=6)) for _ in range(8)]),
]
tensors8 = OrderedDict((r[0], mt.tensor(r[1], chunk_size=3)) for r in raws8)
raws8.append(("d", 1))
raws8.append(("e", pd.date_range("2020-1-1", periods=8)))
tensors8["d"] = 1
tensors8["e"] = raws8[-1][1]
df8 = dataframe_from_1d_tileables(tensors8, columns=[r[0] for r in raws8])
result = df8.execute().fetch()
pdf_expected = pd.DataFrame(OrderedDict(raws8))
pd.testing.assert_frame_equal(result, pdf_expected)
# from 1d tensors and specify index with a tensor
index_raw9 = np.random.rand(8)
index9 = mt.tensor(index_raw9, chunk_size=4)
df9 = dataframe_from_1d_tileables(
tensors8, columns=[r[0] for r in raws8], index=index9
)
result = df9.execute().fetch()
pdf_expected = pd.DataFrame(OrderedDict(raws8), index=index_raw9)
pd.testing.assert_frame_equal(result, pdf_expected)
# from 1d tensors and specify index
df11 = dataframe_from_1d_tileables(
tensors8,
columns=[r[0] for r in raws8],
index=md.date_range("2020-1-1", periods=8),
)
result = df11.execute().fetch()
pdf_expected = pd.DataFrame(
OrderedDict(raws8), index=pd.date_range("2020-1-1", periods=8)
)
pd.testing.assert_frame_equal(result, pdf_expected)
def test_from_records_execution(setup):
dtype = np.dtype([("x", "int"), ("y", "double"), ("z", "<U16")])
ndarr = np.ones((10,), dtype=dtype)
pdf_expected = pd.DataFrame.from_records(ndarr, index=pd.RangeIndex(10))
# from structured array of mars
tensor = mt.ones((10,), dtype=dtype, chunk_size=3)
df1 = from_records(tensor)
df1_result = df1.execute().fetch()
pd.testing.assert_frame_equal(df1_result, pdf_expected)
# from structured array of numpy
df2 = from_records(ndarr)
df2_result = df2.execute().fetch()
pd.testing.assert_frame_equal(df2_result, pdf_expected)
def test_read_csv_execution(setup):
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
df = pd.DataFrame(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int64),
columns=["a", "b", "c"],
)
df.to_csv(file_path)
pdf = pd.read_csv(file_path, index_col=0)
r = md.read_csv(file_path, index_col=0)
mdf = r.execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf)
# size_res = self.executor.execute_dataframe(r, mock=True)
# assert sum(s[0] for s in size_res) == os.stat(file_path).st_size
mdf2 = md.read_csv(file_path, index_col=0, chunk_bytes=10).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf2)
mdf = md.read_csv(file_path, index_col=0, nrows=1).execute().fetch()
pd.testing.assert_frame_equal(df[:1], mdf)
# test names and usecols
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
df = pd.DataFrame(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int64),
columns=["a", "b", "c"],
)
df.to_csv(file_path, index=False)
mdf = md.read_csv(file_path, usecols=["c", "b"]).execute().fetch()
pd.testing.assert_frame_equal(pd.read_csv(file_path, usecols=["c", "b"]), mdf)
mdf = (
md.read_csv(file_path, names=["a", "b", "c"], usecols=["c", "b"])
.execute()
.fetch()
)
pd.testing.assert_frame_equal(
pd.read_csv(file_path, names=["a", "b", "c"], usecols=["c", "b"]), mdf
)
mdf = (
md.read_csv(file_path, names=["a", "b", "c"], usecols=["a", "c"])
.execute()
.fetch()
)
pd.testing.assert_frame_equal(
pd.read_csv(file_path, names=["a", "b", "c"], usecols=["a", "c"]), mdf
)
mdf = md.read_csv(file_path, usecols=["a", "c"]).execute().fetch()
pd.testing.assert_frame_equal(pd.read_csv(file_path, usecols=["a", "c"]), mdf)
# test sep
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
df = pd.DataFrame(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), columns=["a", "b", "c"]
)
df.to_csv(file_path, sep=";")
pdf = pd.read_csv(file_path, sep=";", index_col=0)
mdf = md.read_csv(file_path, sep=";", index_col=0).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf)
mdf2 = (
md.read_csv(file_path, sep=";", index_col=0, chunk_bytes=10)
.execute()
.fetch()
)
pd.testing.assert_frame_equal(pdf, mdf2)
# test missing value
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
df = pd.DataFrame(
{
"c1": [np.nan, "a", "b", "c"],
"c2": [1, 2, 3, np.nan],
"c3": [np.nan, np.nan, 3.4, 2.2],
}
)
df.to_csv(file_path)
pdf = pd.read_csv(file_path, index_col=0)
mdf = md.read_csv(file_path, index_col=0).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf)
mdf2 = md.read_csv(file_path, index_col=0, chunk_bytes=12).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf2)
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
index = pd.date_range(start="1/1/2018", periods=100)
df = pd.DataFrame(
{
"col1": np.random.rand(100),
"col2": np.random.choice(["a", "b", "c"], (100,)),
"col3": np.arange(100),
},
index=index,
)
df.to_csv(file_path)
pdf = pd.read_csv(file_path, index_col=0)
mdf = md.read_csv(file_path, index_col=0).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf)
mdf2 = md.read_csv(file_path, index_col=0, chunk_bytes=100).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf2)
# test nan
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.csv")
df = pd.DataFrame(
{
"col1": np.random.rand(100),
"col2": np.random.choice(["a", "b", "c"], (100,)),
"col3": np.arange(100),
}
)
df.iloc[20:, :] = pd.NA
df.to_csv(file_path)
pdf = pd.read_csv(file_path, index_col=0)
mdf = md.read_csv(file_path, index_col=0, head_lines=10, chunk_bytes=200)
result = mdf.execute().fetch()
pd.testing.assert_frame_equal(pdf, result)
# dtypes is inferred as expected
pd.testing.assert_series_equal(
mdf.dtypes, pd.Series(["float64", "object", "int64"], index=df.columns)
)
# test compression
with tempfile.TemporaryDirectory() as tempdir:
file_path = os.path.join(tempdir, "test.gzip")
index = pd.date_range(start="1/1/2018", periods=100)
df = pd.DataFrame(
{
"col1": np.random.rand(100),
"col2": np.random.choice(["a", "b", "c"], (100,)),
"col3": np.arange(100),
},
index=index,
)
df.to_csv(file_path, compression="gzip")
pdf = pd.read_csv(file_path, compression="gzip", index_col=0)
mdf = md.read_csv(file_path, compression="gzip", index_col=0).execute().fetch()
pd.testing.assert_frame_equal(pdf, mdf)
mdf2 = (
md.read_csv(file_path, compression="gzip", index_col=0, chunk_bytes="1k")
.execute()
.fetch()
)
pd.testing.assert_frame_equal(pdf, mdf2)
# test multiple files
for merge_small_file_option in [{"n_sample_file": 1}, None]:
with tempfile.TemporaryDirectory() as tempdir:
df = pd.DataFrame(np.random.rand(300, 3), columns=["a", "b", "c"])
file_paths = [os.path.join(tempdir, f"test{i}.csv") for i in range(3)]
df[:100].to_csv(file_paths[0])
df[100:200].to_csv(file_paths[1])
df[200:].to_csv(file_paths[2])
mdf = (
md.read_csv(
file_paths,
index_col=0,
merge_small_file_options=merge_small_file_option,
)
.execute()
.fetch()
)
pd.testing.assert_frame_equal(df, mdf)
mdf2 = (
md.read_csv(file_paths, index_col=0, chunk_bytes=50).execute().fetch()
)
| pd.testing.assert_frame_equal(df, mdf2) | pandas.testing.assert_frame_equal |
import numpy as np
import pandas as pd
from scipy import signal
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
from matplotlib.pyplot import cm
from scipy.interpolate import interp1d
from .core import spk_time_to_scv, firing_pos_from_scv, smooth
from ..base import SPKTAG
from ..utils import colorbar
from ..utils.plotting import colorline
def info_bits(Fr, P):
Fr[Fr==0] = 1e-25
MFr = sum(P.ravel()*Fr.ravel())
return sum(P.ravel()*(Fr.ravel()/MFr)*np.log2(Fr.ravel()/MFr))
def info_sparcity(Fr, P):
Fr[Fr==0] = 1e-25
MFr = sum(P.ravel()*Fr.ravel())
return sum(P.ravel()*Fr.ravel()**2/MFr**2)
class place_field(object):
'''
place cells class contains `ts` `pos` `scv` for analysis
load_log for behavior
load_spktag for spike data
get_fields for computing the representaions using spike and behavior data
'''
def __init__(self, pos, v_cutoff=5, bin_size=2.5, ts=None, t_step=None):
'''
resample the trajectory with new time interval
reinitiallize with a new t_step (dt)
'''
if ts is None:
ts = np.arange(0, pos.shape[0]*t_step, t_step)
self.t_step = t_step
self.ts, self.pos = ts, pos
self._ts_restore, self._pos_restore = ts, pos
self.spk_time_array, self.spk_time_dict = None, None
self.df = {}
# key parameters for initialization (before self.initialize we need to align behavior with ephys)
self.bin_size = bin_size
self.v_cutoff = v_cutoff
self.initialize(bin_size=self.bin_size, v_cutoff=self.v_cutoff)
def __call__(self, t_step):
'''
resample the trajectory with new time interval
reinitiallize with a new t_step (dt)
'''
fs = self.fs
new_fs = 1/t_step
self.t_step = t_step
self.ts, self.pos = self.interp_pos(self.ts, self.pos, self.t_step)
self.get_speed()
def restore(self):
self.ts, self.pos = self._ts_restore, self._pos_restore
@property
def fs(self):
self._fs = 1/(self.ts[1]-self.ts[0])
return self._fs
def interp_pos(self, t, pos, new_dt):
'''
convert irregularly sampled pos into regularly sampled pos
N is the dilution sampling factor. N=2 means half of the resampled pos
example:
>>> new_fs = 200.
>>> pc.ts, pc.pos = pc.interp_pos(ts, pos, N=fs/new_fs)
'''
dt = t[1] - t[0]
x, y = interp1d(t, pos[:,0], fill_value="extrapolate"), interp1d(t, pos[:,1], fill_value="extrapolate")
new_t = np.arange(t[0], t[-1], new_dt)
new_pos = np.hstack((x(new_t).reshape(-1,1), y(new_t).reshape(-1,1)))
return new_t, new_pos
def align_with_recording(self, recording_start_time, recording_end_time, replay_offset=0):
'''
ts before alignment |--------------------|
behavior start: |
behavior end: |
recording start: |------------
recording end: ------------|
replay_offset : |
ts after alignment |------------|
'''
self.ts += replay_offset # 0 if the ephys is not offset by replaying through neural signal generator
self.pos = self.pos[np.logical_and(self.ts>recording_start_time, self.ts<recording_end_time)]
self.ts = self.ts[np.logical_and(self.ts>recording_start_time, self.ts<recording_end_time)]
self.t_start = self.ts[0]
self.t_end = self.ts[-1]
self._ts_restore, self._pos_restore = self.ts, self.pos
def initialize(self, bin_size, v_cutoff, maze_range=None):
self.dt = self.ts[1] - self.ts[0]
self.v_cutoff = v_cutoff
self.get_maze_range(maze_range)
self.get_speed()
self.occupation_map(bin_size)
self.pos_df = pd.DataFrame(np.hstack((self.ts.reshape(-1,1), self.pos)),
columns=['time', 'x', 'y'])
# self.binned_pos = (self.pos-self.maze_original)//self.bin_size
def get_maze_range(self, maze_range=None):
if maze_range is None:
self.maze_range = np.vstack((self.pos.min(axis=0), self.pos.max(axis=0))).T
self._maze_original = self.maze_range[:,0] # the left, down corner location
else:
self.maze_range = np.array(maze_range)
self._maze_original = self.maze_range[:,0] # the left, down corner location
@property
def maze_center(self):
self._maze_center = self.maze_original[0]+self.maze_length[0]/2, self.maze_original[1]+self.maze_length[1]/2
return self._maze_center
@property
def maze_original(self):
return self._maze_original
@property
def maze_length(self):
return np.diff(self.maze_range, axis=1).ravel()
@property
def maze_ratio(self):
return self.maze_length[0]/self.maze_length[1]
@property
def binned_pos(self):
return (self.pos-self.maze_original)//self.bin_size
def binned_pos_2_real_pos(self, binned_pos):
pos = binned_pos*self.bin_size + self.maze_original
return pos
def real_pos_2_binned_pos(self, real_pos, interger_output=True):
if interger_output:
binned_pos = (real_pos - self.maze_original)//self.bin_size
else:
binned_pos = (real_pos - self.maze_original)/self.bin_size
return binned_pos
def get_speed(self):
'''
self.ts, self.pos is required
'''
self.v = np.linalg.norm(np.diff(self.pos, axis=0), axis=1)/np.diff(self.ts)
self.v = np.hstack((self.v[0], self.v))
self.v_smoothed = smooth(self.v.reshape(-1,1), int(np.round(self.fs))).ravel()
self.low_speed_idx = np.where(self.v_smoothed < self.v_cutoff)[0]
self.df['pos'] = pd.DataFrame(data=np.hstack((self.pos, self.v_smoothed.reshape(-1,1))), index=self.ts,
columns=['x','y','v'])
self.df['pos'].index.name = 'ts'
'''
# check speed:
f, ax = plt.subplots(1,1, figsize=(18,8))
offset=20000
plot(ts[offset:1000+offset], v[offset:1000+offset])
plot(ts[offset:1000+offset], v_smoothed[offset:1000+offset])
ax.axhline(5, c='m', ls='-.')
'''
# return v_smoothed, v
def plot_speed(self, start=None, stop=None, v_cutoff=5):
if start is None:
start = self.ts[0]
if stop is None:
stop = self.ts[-1]
fig, ax = plt.subplots(1,1, figsize=(18,5))
period = np.logical_and(self.ts>start, self.ts<stop)
plt.plot(self.ts[period], self.v[period], alpha=.7)
plt.plot(self.ts[period], self.v_smoothed[period], lw=3)
ax.axhline(v_cutoff, c='m', ls='-.')
sns.despine()
return fig
def occupation_map(self, bin_size=4, time_cutoff=None):
'''
f, ax = plt.subplots(1,2,figsize=(20,9))
ax[0].plot(self.pos[:,0], self.pos[:,1])
ax[0].plot(self.pos[0,0], self.pos[0,1], 'ro')
ax[0].plot(self.pos[-1,0], self.pos[-1,1], 'ko')
ax[0].pcolormesh(self.X, self.Y, self.O, cmap=cm.hot_r)
sns.heatmap(self.O[::-1]*self.dt, annot=False, cbar=False, ax=ax[1])
'''
# if maze_range != 'auto':
# self.maze_range = maze_range
self.maze_size = np.array([self.maze_range[0][1]-self.maze_range[0][0], self.maze_range[1][1]-self.maze_range[1][0]])
self.bin_size = bin_size
self.nbins = self.maze_size/bin_size
self.nbins = self.nbins.astype(int)
# occupation, self.x_edges, self.y_edges = np.histogram2d(x=self.pos[1:,0], y=self.pos[1:,1],
# bins=self.nbins, range=self.maze_range)
idx = np.where(self.v_smoothed >= self.v_cutoff)[0]
if time_cutoff is not None:
idx = np.delete(idx, np.where(self.ts[idx]>time_cutoff)[0])
occupation, self.x_edges, self.y_edges = np.histogram2d(x=self.pos[idx,0], y=self.pos[idx,1],
bins=self.nbins, range=self.maze_range)
self.X, self.Y = np.meshgrid(self.x_edges, self.y_edges)
self.O = occupation.T.astype(int) # Let each row list bins with common y range.
self.P = self.O/float(self.O.sum()) # occupation prabability
#### parameter used to calculate the fields
self.kernlen = 18
self.kernstd = 2.5
def plot_occupation_map(self, cmap=cm.viridis):
f, ax = plt.subplots(1,2,figsize=(20,9))
ax[0].plot(self.pos[:,0], self.pos[:,1])
ax[0].plot(self.pos[0,0], self.pos[0,1], 'ro')
ax[0].plot(self.pos[-1,0], self.pos[-1,1], 'go')
ax[0].pcolormesh(self.X, self.Y, self.O, cmap=cmap)
ax[1].pcolormesh(self.X, self.Y, self.O, cmap=cmap)
plt.show()
@property
def map_binned_size(self):
return np.array(np.diff(self.maze_range)/self.bin_size, dtype=np.int).ravel()[::-1]
@staticmethod
def gkern(kernlen=21, std=2):
"""Returns a 2D Gaussian kernel array."""
gkern1d = signal.gaussian(kernlen, std=std).reshape(kernlen, 1)
gkern2d = np.outer(gkern1d, gkern1d)
gkern2d /= gkern2d.sum()
return gkern2d
def _get_field(self, spk_times):
spk_ts = np.searchsorted(self.ts, spk_times) - 1
idx = np.setdiff1d(spk_ts, self.low_speed_idx)
self.firing_ts = self.ts[spk_ts] #[:,1]
self.firing_pos = self.pos[idx]
self.firing_map, x_edges, y_edges = np.histogram2d(x=self.firing_pos[:,0], y=self.firing_pos[:,1],
bins=self.nbins, range=self.maze_range)
self.firing_map = self.firing_map.T
np.seterr(divide='ignore', invalid='ignore')
self.FR = self.firing_map/self.O/self.dt
# self.FR = np.nan_to_num(self.FR)
self.FR[np.isnan(self.FR)] = 0
self.FR[np.isinf(self.FR)] = 0
self.FR_smoothed = signal.convolve2d(self.FR, self.gkern(self.kernlen, self.kernstd), boundary='symm', mode='same')
return self.FR_smoothed
def firing_map_from_scv(self, scv, t_step, section=[0,1]):
'''
firing heat map constructed from spike count vector (scv) and position
'''
# assert(scv.shape[1]==self.pos.shape[0])
scv = scv.T.copy()
n_neurons, total_bin = scv.shape
valid_bin = np.array(np.array(section)*total_bin, dtype=np.int)
firing_map_smoothed = np.zeros((n_neurons, *self.map_binned_size))
for neuron_id in range(n_neurons):
firing_pos = firing_pos_from_scv(scv, self.pos, neuron_id, valid_bin)
firing_map, x_edges, y_edges = np.histogram2d(x=firing_pos[:,0], y=firing_pos[:,1],
bins=self.nbins, range=self.maze_range)
firing_map = firing_map.T/self.O/t_step
firing_map[np.isnan(firing_map)] = 0
firing_map[np.isinf(firing_map)] = 0
firing_map_smoothed[neuron_id] = signal.convolve2d(firing_map, self.gkern(self.kernlen, self.kernstd), boundary='symm', mode='same')
firing_map_smoothed[firing_map_smoothed==0] = 1e-25
self.fields = firing_map_smoothed
self.n_fields = self.fields.shape[0]
self.n_units = self.n_fields
def get_field(self, spk_time_dict, neuron_id, start=None, end=None):
'''
f, ax = plt.subplots(1,2,figsize=(20,9))
ax[0].plot(self.pos[:,0], self.pos[:,1])
ax[0].plot(self.firing_pos[:,0], self.firing_pos[:,1], 'mo', alpha=0.5)
# ax[0].pcolormesh(self.X, self.Y, self.FR, cmap=cm.hot)
pc = ax[1].pcolormesh(X, Y, FR_GAU, cmap=cm.hot)
colorbar(pc, ax=ax[1], label='Hz')
'''
spk_times = spk_time_dict[neuron_id]
### for cross-validation and field stability check
### calculate representation from `start` to `end`
if start is not None and end is not None:
spk_times = spk_times[np.logical_and(start<=spk_times, spk_times<end)]
self._get_field(spk_times)
def _plot_field(self, trajectory=False, cmap='viridis', marker=True, alpha=0.5, markersize=5, markercolor='m'):
f, ax = plt.subplots(1,1,figsize=(13,10));
pcm = ax.pcolormesh(self.X, self.Y, self.FR_smoothed, cmap=cmap);
plt.colorbar(pcm, ax=ax, label='Hz');
if trajectory:
ax.plot(self.pos[:,0], self.pos[:,1], alpha=0.8);
ax.plot(self.pos[0,0], self.pos[0,1], 'ro');
ax.plot(self.pos[-1,0],self.pos[-1,1], 'ko');
if marker:
ax.plot(self.firing_pos[:,0], self.firing_pos[:,1], 'o',
c=markercolor, alpha=alpha, markersize=markersize);
return f,ax
def get_fields(self, spk_time_dict=None, start=None, end=None, v_cutoff=None, rank=True):
'''
spk_time_dict is dictionary start from 0: (each spike train is a numpy array)
{0: spike trains for neuron 0
1: spike trains for neuron 1
2: spike trains for neuron 2
...
N: spike trains for neuron N}
'''
if spk_time_dict is None:
spk_time_dict = self.spk_time_dict
self.n_fields = len(spk_time_dict.keys())
self.n_units = self.n_fields
self.fields = np.zeros((self.n_fields, self.O.shape[0], self.O.shape[1]))
self.firing_pos_dict = {}
if v_cutoff is None:
self.get_speed()
else:
self.v_cutoff = v_cutoff
self.get_speed()
for i in spk_time_dict.keys():
### get place fields from neuron i
self.get_field(spk_time_dict, i, start, end)
self.fields[i] = self.FR_smoothed
self.firing_pos_dict[i] = self.firing_pos
self.fields[i] = self.FR_smoothed
### metrics for place fields
self.fields[self.fields==0] = 1e-25
if rank is True:
self.rank_fields(metric_name='spatial_bit_smoothed_spike')
def plot_fields(self, idx=None, nspks=None, N=10, size=3, cmap='hot', marker=False, markersize=1, alpha=0.8, order=False):
'''
order: if True will plot with ranked fields according to the metric
'''
if idx is None: # plot all fields
nrow = self.n_fields/N + 1
ncol = N
fig = plt.figure(figsize=(ncol*size, nrow*size));
# plt.tight_layout();
plt.subplots_adjust(wspace=None, hspace=None);
for i in range(self.n_fields):
ax = fig.add_subplot(nrow, ncol, i+1);
if order:
field_id = self.sorted_fields_id[i]
else:
field_id = i
pcm = ax.pcolormesh(self.X, self.Y, self.fields[field_id], cmap=cmap);
ax.set_title('#{0}: {1:.2f}Hz'.format(field_id, self.fields[field_id].max()), fontsize=20)
ax.set_xticks([])
ax.set_yticks([])
ax.set_aspect(self.maze_ratio)
if marker:
ax.plot(self.firing_pos_dict[field_id][:,0], self.firing_pos_dict[field_id][:,1],
'mo', markersize=markersize, alpha=alpha)
plt.grid(False)
plt.show();
else:
nrow = len(idx)/N + 1
ncol = N
fig = plt.figure(figsize=(ncol*size, nrow*size));
plt.subplots_adjust(wspace=None, hspace=None);
for i, field_id in enumerate(idx):
ax = fig.add_subplot(nrow, ncol, i+1);
pcm = ax.pcolormesh(self.X, self.Y, self.fields[field_id], cmap=cmap);
ax.set_title('#{0}: {1:.2f}Hz'.format(field_id, self.fields[field_id].max()))
ax.set_xticks([])
ax.set_yticks([])
if nspks is not None:
ax.set_xlabel('{} spikes'.format(nspks[i]))
if marker:
ax.plot(self.firing_pos_dict[field_id][:,0], self.firing_pos_dict[field_id][:,1],
'mo', markersize=markersize, alpha=alpha)
ax.set_aspect(self.maze_ratio)
plt.grid(False)
plt.show();
return fig
def plot_field(self, i=0, cmap=None, alpha=.3, markersize=10, markercolor='#66f456', trajectory=True):
'''
plot ith place field with information in detail, only called after `pc.get_fields(pc.spk_time_dict, rank=True)`
example:
@interact(i=(0, pc.n_units-1, 1))
def view_fields(i=0):
pc.plot_field(i)
'''
if cmap is None:
cmap = sns.cubehelix_palette(as_cmap=True, dark=0.05, light=1.2, reverse=True);
neuron_id = self.sorted_fields_id[i]
self._get_field(self.spk_time_dict[neuron_id])
f,ax = self._plot_field(cmap=cmap, alpha=alpha, markersize=markersize,
markercolor=markercolor, trajectory=trajectory);
n_bits = self.metric['spatial_bit_spike'][neuron_id]
p_rate = self.metric['peak_rate'][neuron_id]
ax.set_title('neuron {0}: max firing rate {1:.2f}Hz, {2:.3f} bits'.format(neuron_id, p_rate, n_bits))
return f,ax
def rank_fields(self, metric_name):
'''
metric_name: spatial_bit_spike, spatial_bit_smoothed_spike, spatial_sparcity
'''
self.metric = {}
self.metric['peak_rate'] = np.zeros((self.n_fields,))
self.metric['spatial_bit_spike'] = np.zeros((self.n_fields,))
self.metric['spatial_bit_smoothed_spike'] = np.zeros((self.n_fields,))
self.metric['spatial_sparcity'] = np.zeros((self.n_fields,))
for neuron_id in range(self.fields.shape[0]):
self.metric['peak_rate'][neuron_id] = self.fields[neuron_id].max()
self.metric['spatial_bit_spike'][neuron_id] = info_bits(self.fields[neuron_id], self.P)
self.metric['spatial_bit_smoothed_spike'][neuron_id] = info_bits(self.fields[neuron_id], self.P)
self.metric['spatial_sparcity'][neuron_id] = info_sparcity(self.fields[neuron_id], self.P)
self.sorted_fields_id = np.argsort(self.metric[metric_name])[::-1]
def raster(self, ls, colorful=False, xlim=None, ylim=None):
color_list = ['C{}'.format(i) for i in range(self.n_units)]
fig, ax = plt.subplots(1,1, figsize=(15,10));
if colorful:
ax.eventplot(positions=self.spk_time_array, colors=color_list, ls=ls, alpha=.2);
else:
ax.eventplot(positions=self.spk_time_array, colors='k', ls=ls, alpha=.2);
if xlim is not None:
ax.set_xlim(xlim);
if ylim is not None:
ax.set_ylim(ylim);
ax.set_ylabel('unit')
ax.set_xlabel('time (secs)')
sns.despine()
return fig
def load_spkdf(self, df_file, fs=25000., replay_offset=0, show=False):
'''
core function: load spike dataframe in spktag folder (to get Spikes)
This function also align ephys with behavior and compute the place fields of each found units in the `df_file`
Example:
------------
pc = place_field(pos=pos, ts=ts)
pc.load_spkdf(spktag_file_df)
pc.report()
'''
print('--------------- place cell object: load spktag dataframe ---------------\r\n')
try:
self.spike_df = pd.read_pickle(df_file)
self.spike_df['frame_id'] /= fs
self.spike_df.set_index('spike_id', inplace=True)
self.spike_df.index = self.spike_df.index.astype(int)
self.spike_df.index -= self.spike_df.index.min()
self.df['spk'] = self.spike_df
self.spk_time_dict = {i: self.spike_df.loc[i]['frame_id'].to_numpy()
for i in self.spike_df.index.unique().sort_values()}
self.df['spk'].reset_index(inplace=True)
self.n_units = np.sort(self.spike_df.spike_id.unique()).shape[0]
self.n_groups = np.sort(self.spike_df.group_id.unique()).shape[0]
print('1. Load the spktag dataframe\r\n {} units are found in {} electrode-groups\r\n'.format(self.n_units, self.n_groups))
except:
print('! Fail to load spike dataframe')
start, end = self.spike_df.frame_id.iloc[0], self.spike_df.frame_id.iloc[-1]
self.align_with_recording(start, end, replay_offset)
# after align_with_recording we have the correct self.ts and self.pos
self.total_spike = len(self.spike_df)
self.total_time = self.ts[-1] - self.ts[0]
self.mean_mua_firing_rate = self.total_spike/self.total_time
self.dt = np.diff(self.ts)[0]*1e3
print('2. Align the behavior and ephys data with {0} offset\r\n starting at {1:.3f} secs, end at {2:.3f} secs, step at {3:.3f} ms\r\n all units mount up to {4:.3f} spikes/sec\r\n'.format(replay_offset, start, end, self.dt, self.mean_mua_firing_rate))
print('3. Calculate the place field during [{},{}] secs\r\n spatially bin the maze, calculate speed and occupation_map with {}cm bin_size\r\n dump spikes when speed is lower than {}cm/secs\r\n'.format(start, end, self.bin_size, self.v_cutoff))
self.initialize(bin_size=self.bin_size, v_cutoff=self.v_cutoff)
self.get_fields(self.spk_time_dict, rank=True)
try:
self.df['spk']['x'] = np.interp(self.df['spk']['frame_id'], self.ts, self.pos[:,0])
self.df['spk']['y'] = np.interp(self.df['spk']['frame_id'], self.ts, self.pos[:,1])
self.df['spk']['v'] = np.interp(self.df['spk']['frame_id'], self.ts, self.v_smoothed)
print('4. Interpolate the position and speed to each spikes, check `pc.spike_df`\r\n')
except:
print('! Fail to fill the position and speed to the spike dataframe')
if show is True:
self.field_fig = self.plot_fields();
print('------------------------------------------------------------------------')
def report(self):
print('occupation map from {0:.2f} to {1:.2f}, with speed cutoff:{2:.2f}'.format(self.ts[0], self.ts[-1], self.v_cutoff))
self.plot_occupation_map();
self.plot_speed(self.ts[0], self.ts[-1]//10, v_cutoff=self.v_cutoff);
self.plot_fields(N=10, cmap='hot', order=True);
def load_spktag(self, spktag_file, show=False):
'''
1. load spktag
2. extract unit time stamps
3. calculate the place fields
4. rank based on its information bit
5. (optional) plot place fields of each unit
check pc.n_units, pc.n_fields and pc.metric after this
'''
spktag = SPKTAG()
spktag.load(spktag_file)
self.spktag_file = spktag_file
self.spk_time_array, self.spk_time_dict = spktag.spk_time_array, spktag.spk_time_dict
self.get_fields(self.spk_time_dict, rank=True)
if show is True:
self.field_fig = self.plot_fields();
def get_scv(self, t_window):
'''
The offline binner to calculate the spike count vector (scv)
run `pc.load_spktag(spktag_file)` first
t_window is the window to count spikes
t_step defines the sliding window size
'''
# if t_step is None:
self.scv = spk_time_to_scv(self.spk_time_dict, t_window=t_window, ts=self.ts)
self.mua_count = self.scv.sum(axis=1)
# scv = scv[self.sorted_fields_id]
return self.scv
# else:
# new_ts = np.arange(self.t_start, self.t_end, t_step)
# scv = spk_time_to_scv(self.spk_time_dict, delta_t=t_window, ts=new_ts)
# # scv = scv[self.sorted_fields_id]
# x, y = interp1d(self.ts, self.pos[:,0], fill_value="extrapolate"), interp1d(self.ts, self.pos[:,1], fill_value="extrapolate")
# new_pos = np.hstack((x(new_ts).reshape(-1,1), y(new_ts).reshape(-1,1)))
# return scv, new_ts, new_pos
def plot_epoch(self, time_range, figsize=(5,5), marker=['ro', 'wo'], markersize=15, alpha=.5, cmap=None, legend_loc=None):
'''
plot trajactory within time_range: [[a0,b0],[a1,b1]...]
with color code indicate the speed.
'''
gs = dict(height_ratios=[20,1])
fig, ax = plt.subplots(2,1,figsize=(5, 5), gridspec_kw=gs)
for i, _time_range in enumerate(time_range): # ith epoches
epoch = np.where((self.ts<_time_range[1]) & (self.ts>=_time_range[0]))[0]
if cmap is None:
cmap = mpl.cm.cool
norm = mpl.colors.Normalize(vmin=self.v_smoothed.min(), vmax=self.v_smoothed.max())
ax[0] = colorline(x=self.pos[epoch, 0], y=self.pos[epoch, 1],
z=self.v_smoothed[epoch]/self.v_smoothed.max(), #[0,1]
cmap=cmap, ax=ax[0])
if i ==0:
ax[0].plot(self.pos[epoch[-1], 0], self.pos[epoch[-1], 1], marker[0], markersize=markersize, alpha=alpha, label='end')
ax[0].plot(self.pos[epoch[0], 0], self.pos[epoch[0], 1], marker[1], markersize=markersize, alpha=alpha, label='start')
else:
ax[0].plot(self.pos[epoch[-1], 0], self.pos[epoch[-1], 1], marker[0], markersize=markersize, alpha=alpha)
ax[0].plot(self.pos[epoch[0], 0], self.pos[epoch[0], 1], marker[1], markersize=markersize, alpha=alpha)
ax[0].set_xlim(self.maze_range[0]);
ax[0].set_ylim(self.maze_range[1]);
# ax[0].set_title('trajectory in [{0:.2f},{1:.2f}] secs'.format(_time_range[0], _time_range[1]))
if legend_loc is not None:
ax[0].legend(loc=legend_loc)
cb = mpl.colorbar.ColorbarBase(ax[1], cmap=cmap,
norm=norm,
orientation='horizontal')
cb.set_label('speed (cm/sec)')
return ax
def to_file(self, filename):
df_all_in_one = | pd.concat([self.pos_df, self.spike_df], sort=True) | pandas.concat |
"""Tests for Table Schema integration."""
import json
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
from pandas import DataFrame
from pandas.core.dtypes.dtypes import (
PeriodDtype, CategoricalDtype, DatetimeTZDtype)
from pandas.io.json.table_schema import (
as_json_table_type,
build_table_schema,
make_field,
set_default_names)
class TestBuildSchema(object):
def setup_method(self, method):
self.df = DataFrame(
{'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'c'],
'C': pd.date_range('2016-01-01', freq='d', periods=4),
'D': pd.timedelta_range('1H', periods=4, freq='T'),
},
index=pd.Index(range(4), name='idx'))
def test_build_table_schema(self):
result = build_table_schema(self.df, version=False)
expected = {
'fields': [{'name': 'idx', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'},
{'name': 'D', 'type': 'duration'},
],
'primaryKey': ['idx']
}
assert result == expected
result = build_table_schema(self.df)
assert "pandas_version" in result
def test_series(self):
s = pd.Series([1, 2, 3], name='foo')
result = build_table_schema(s, version=False)
expected = {'fields': [{'name': 'index', 'type': 'integer'},
{'name': 'foo', 'type': 'integer'}],
'primaryKey': ['index']}
assert result == expected
result = build_table_schema(s)
assert 'pandas_version' in result
def test_series_unnamed(self):
result = build_table_schema(pd.Series([1, 2, 3]), version=False)
expected = {'fields': [{'name': 'index', 'type': 'integer'},
{'name': 'values', 'type': 'integer'}],
'primaryKey': ['index']}
assert result == expected
def test_multiindex(self):
df = self.df.copy()
idx = pd.MultiIndex.from_product([('a', 'b'), (1, 2)])
df.index = idx
result = build_table_schema(df, version=False)
expected = {
'fields': [{'name': 'level_0', 'type': 'string'},
{'name': 'level_1', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'},
{'name': 'D', 'type': 'duration'},
],
'primaryKey': ['level_0', 'level_1']
}
assert result == expected
df.index.names = ['idx0', None]
expected['fields'][0]['name'] = 'idx0'
expected['primaryKey'] = ['idx0', 'level_1']
result = build_table_schema(df, version=False)
assert result == expected
class TestTableSchemaType(object):
def test_as_json_table_type_int_data(self):
int_data = [1, 2, 3]
int_types = [np.int, np.int16, np.int32, np.int64]
for t in int_types:
assert as_json_table_type(np.array(
int_data, dtype=t)) == 'integer'
def test_as_json_table_type_float_data(self):
float_data = [1., 2., 3.]
float_types = [np.float, np.float16, np.float32, np.float64]
for t in float_types:
assert as_json_table_type(np.array(
float_data, dtype=t)) == 'number'
def test_as_json_table_type_bool_data(self):
bool_data = [True, False]
bool_types = [bool, np.bool]
for t in bool_types:
assert as_json_table_type(np.array(
bool_data, dtype=t)) == 'boolean'
def test_as_json_table_type_date_data(self):
date_data = [pd.to_datetime(['2016']),
pd.to_datetime(['2016'], utc=True),
pd.Series(pd.to_datetime(['2016'])),
pd.Series( | pd.to_datetime(['2016'], utc=True) | pandas.to_datetime |
from datetime import date
from typing import Dict, List, Optional, Union
try:
from sklearn.base import TransformerMixin # type: ignore
from sklearn.exceptions import NotFittedError # type: ignore
except ImportError:
TransformerMixin = object
NotFittedError = Exception
import itertools
import uuid
import numpy as np
import pandas as pd
from upgini.dataset import Dataset
from upgini.metadata import (
SYSTEM_FAKE_DATE,
SYSTEM_RECORD_ID,
FileColumnMeaningType,
ModelTaskType,
SearchKey,
)
from upgini.search_task import SearchTask
from upgini.utils.format import Format
class FeaturesEnricher(TransformerMixin): # type: ignore
"""Retrieve external features via Upgini that are most relevant to predict your target.
Parameters
----------
search_keys: dict of str->SearchKey or int->SearchKey
Dictionary with column names or indices mapping to key types.
Each of this columns will be used as a search key to find features.
keep_input: bool, optional (default=False)
If True, copy original input columns to the output dataframe.
accurate_model: bool, optional (default=False)
If True, search takes longer but returned metrics may be more accurate.
api_key: str, optional (default=None)
Token to authorize search requests. You can get it on https://profile.upgini.com/.
If not specified then read the value from the environment variable UPGINI_API_KEY.
endpoint: str, optional (default=None)
URL of Upgini API where search requests are submitted.
If not specified then used the default value.
Please don't overwrite it if you are unsure.
search_id: str, optional (default=None)
Identifier of fitted enricher.
If not specified transform could be called only after fit or fit_transform call
"""
TARGET_NAME = "target"
EVAL_SET_INDEX = "eval_set_index"
_search_task: Optional[SearchTask] = None
passed_features: List[str] = []
def __init__(
self,
search_keys: Union[Dict[str, SearchKey], Dict[int, SearchKey]],
keep_input: bool = False,
accurate_model: bool = False,
api_key: Optional[str] = None,
endpoint: Optional[str] = None,
search_id: Optional[str] = None,
):
if len(search_keys) == 0:
if search_id:
raise ValueError("To transform with search_id please set search_keys to the value used for fitting.")
else:
raise ValueError("Key columns should be marked up by search_keys.")
self.search_keys = search_keys
self.keep_input = keep_input
self.accurate_model = accurate_model
self.endpoint = endpoint
self.api_key = api_key
if search_id:
search_task = SearchTask(
search_id,
endpoint=self.endpoint,
api_key=self.api_key,
)
print("Checking existing search...")
self._search_task = search_task.poll_result(quiet=True)
print("Search found. Now you can use transform")
def _inner_fit(
self,
X: pd.DataFrame,
y: Union[pd.Series, np.ndarray, list] = None,
eval_set: Optional[List[tuple]] = None,
extract_features: bool = False,
**fit_params,
) -> pd.DataFrame:
if not isinstance(X, pd.DataFrame):
raise TypeError(f"Only pandas.DataFrame supported for X, but {type(X)} was passed.")
if not isinstance(y, pd.Series) and not isinstance(y, np.ndarray) and not isinstance(y, list):
raise TypeError(f"Only pandas.Series or numpy.ndarray or list supported for y, but {type(y)} was passed.")
if isinstance(y, pd.Series):
y_array = y.values
else:
y_array = y
if X.shape[0] != len(y_array):
raise ValueError("X and y should be the same size")
validated_search_keys = self._prepare_search_keys(X)
search_keys = []
for L in range(1, len(validated_search_keys.keys()) + 1):
for subset in itertools.combinations(validated_search_keys.keys(), L):
search_keys.append(subset)
meaning_types = {
**validated_search_keys.copy(),
**{str(c): FileColumnMeaningType.FEATURE for c in X.columns if c not in validated_search_keys.keys()},
}
df = X.copy()
df[self.TARGET_NAME] = y_array
df.reset_index(drop=True, inplace=True)
meaning_types[self.TARGET_NAME] = FileColumnMeaningType.TARGET
df[SYSTEM_RECORD_ID] = df.apply(lambda row: hash(tuple(row)), axis=1)
meaning_types[SYSTEM_RECORD_ID] = FileColumnMeaningType.SYSTEM_RECORD_ID
df_without_eval_set = df.copy()
if eval_set is not None and len(eval_set) > 0:
df[self.EVAL_SET_INDEX] = 0
meaning_types[self.EVAL_SET_INDEX] = FileColumnMeaningType.EVAL_SET_INDEX
for idx, eval_pair in enumerate(eval_set):
if len(eval_pair) != 2:
raise TypeError(
f"Invalid size of eval_set pair: {len(eval_pair)}. "
"It should contain tuples of 2 elements: X and y."
)
eval_X = eval_pair[0]
eval_y = eval_pair[1]
if not isinstance(eval_X, pd.DataFrame):
raise TypeError(
f"Only pandas.DataFrame supported for X in eval_set, but {type(eval_X)} was passed."
)
if (
not isinstance(eval_y, pd.Series)
and not isinstance(eval_y, np.ndarray)
and not isinstance(eval_y, list)
):
raise TypeError(
"pandas.Series or numpy.ndarray or list supported for y in eval_set, "
f"but {type(eval_y)} was passed."
)
eval_df = eval_X.copy()
eval_df[self.TARGET_NAME] = pd.Series(eval_y)
eval_df[SYSTEM_RECORD_ID] = eval_df.apply(lambda row: hash(tuple(row)), axis=1)
eval_df[self.EVAL_SET_INDEX] = idx + 1
df = pd.concat([df, eval_df], ignore_index=True)
if FileColumnMeaningType.DATE not in meaning_types.values():
df[SYSTEM_FAKE_DATE] = date.today()
search_keys.append((SYSTEM_FAKE_DATE,))
meaning_types[SYSTEM_FAKE_DATE] = FileColumnMeaningType.DATE
dataset = Dataset("tds_" + str(uuid.uuid4()), df=df, endpoint=self.endpoint, api_key=self.api_key)
dataset.meaning_types = meaning_types
dataset.search_keys = search_keys
self.passed_features = [
column for column, meaning_type in meaning_types.items() if meaning_type == FileColumnMeaningType.FEATURE
]
self._search_task = dataset.search(extract_features=extract_features, accurate_model=self.accurate_model)
self.__show_metrics()
return df_without_eval_set
def fit(
self,
X: pd.DataFrame,
y: Union[pd.Series, np.ndarray, List],
eval_set: Optional[List[tuple]] = None,
**fit_params,
):
self._inner_fit(X, y, eval_set, False, **fit_params)
def fit_transform(
self,
X: pd.DataFrame,
y: Union[pd.Series, np.ndarray, List],
eval_set: Optional[List[tuple]] = None,
**fit_params,
) -> pd.DataFrame:
df = self._inner_fit(X, y, eval_set, extract_features=True, **fit_params)
etalon_columns = X.columns + self.TARGET_NAME
if self._search_task is None:
raise RuntimeError("Fit wasn't completed successfully.")
print("Executing transform step...")
result_features = self._search_task.get_all_initial_raw_features()
if result_features is None:
raise RuntimeError("Search engine crashed on this request.")
if self.keep_input:
result = pd.merge(
df.drop(columns=self.TARGET_NAME),
result_features,
left_on=SYSTEM_RECORD_ID,
right_on=SYSTEM_RECORD_ID,
how="left",
)
else:
result = pd.merge(df, result_features, left_on=SYSTEM_RECORD_ID, right_on=SYSTEM_RECORD_ID, how="left")
result.drop(columns=etalon_columns, inplace=True)
result.index = X.index
if SYSTEM_RECORD_ID in result.columns:
result.drop(columns=SYSTEM_RECORD_ID, inplace=True)
if SYSTEM_FAKE_DATE in result.columns:
result.drop(columns=SYSTEM_FAKE_DATE, inplace=True)
return result
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
if self._search_task is None:
raise NotFittedError("`fit` or `fit_transform` should be called before `transform`.")
if not isinstance(X, pd.DataFrame):
raise TypeError(f"Only pandas.DataFrame supported for X, but {type(X)} was passed.")
validated_search_keys = self._prepare_search_keys(X)
search_keys = []
for L in range(1, len(validated_search_keys.keys()) + 1):
for subset in itertools.combinations(validated_search_keys.keys(), L):
search_keys.append(subset)
meaning_types = validated_search_keys.copy()
feature_columns = [column for column in X.columns if column not in meaning_types.keys()]
df = X.copy()
df = df.reset_index(drop=True)
if FileColumnMeaningType.DATE not in meaning_types.values():
df[SYSTEM_FAKE_DATE] = date.today()
search_keys.append((SYSTEM_FAKE_DATE,))
meaning_types[SYSTEM_FAKE_DATE] = FileColumnMeaningType.DATE
df[SYSTEM_RECORD_ID] = df.apply(lambda row: hash(tuple(row[meaning_types.keys()])), axis=1)
meaning_types[SYSTEM_RECORD_ID] = FileColumnMeaningType.SYSTEM_RECORD_ID
# Don't pass features in backend on transform
if feature_columns:
df_without_features = df.drop(columns=feature_columns)
else:
df_without_features = df
dataset = Dataset(
"sample_" + str(uuid.uuid4()), df=df_without_features, endpoint=self.endpoint, api_key=self.api_key
)
dataset.meaning_types = meaning_types
dataset.search_keys = search_keys
validation_task = self._search_task.validation(dataset, extract_features=True)
etalon_columns = list(self.search_keys.keys())
print("Executing transform step...")
result_features = validation_task.get_all_validation_raw_features()
if result_features is None:
raise RuntimeError("Search engine crashed on this request.")
if not self.keep_input:
result = pd.merge(
df_without_features, result_features, left_on=SYSTEM_RECORD_ID, right_on=SYSTEM_RECORD_ID, how="left"
)
result.drop(columns=etalon_columns, inplace=True)
else:
result = | pd.merge(df, result_features, left_on=SYSTEM_RECORD_ID, right_on=SYSTEM_RECORD_ID, how="left") | pandas.merge |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"Merge meteogram files"
import re
import glob
import functools
import itertools
from collections import OrderedDict, defaultdict, namedtuple
import netCDF4
import numpy as np
import pandas as pd
var_signature = namedtuple('var_signature', 'name dtype dimensions')
time_signature = var_signature('time', 'i4', ('time',))
fill_value = {'_FillValue': 9.96920996839e+36}
time_metadata = OrderedDict([
("standard_name", "time"),
("long_name", "time"),
("units", "seconds since"),
("calendar", "proleptic_gregorian"),
("axis", "T"),
])
group_mapping = OrderedDict([
# group_key -> primary_key
('station_', 'station_name'),
('var_', 'var_name'),
('sfcvar_', 'sfcvar_name'),
])
class OrderedDefaultDict(OrderedDict, defaultdict):
"default dict that keeps the order"
def __init__(self, factory, *args, **kwargs):
defaultdict.__init__(self, factory)
OrderedDict.__init__(self, *args, **kwargs)
def memoize(func):
cache = func.cache = {}
@functools.wraps(func)
def wrapper(*args, **kwargs):
key = (args, frozenset(kwargs.items()))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
return wrapper
def index2slice(indices, *more_indices):
def inc(i): return i + 1
def inc_deduct(values, counter=itertools.count()):
c = next(counter)
if isinstance(values, int):
return values - c
return [(val - c) for val in values]
if more_indices:
indices = zip(indices, *more_indices)
slices = []
for (_, g) in itertools.groupby(indices, inc_deduct):
values = list(g)
end = values.pop()
if values:
start = values.pop(0)
if isinstance(start, tuple):
slices.append(
[slice(s, e) for (s, e) in zip(start, map(inc, end))])
else:
slices.append(slice(start, inc(end)))
else:
slices.append(end)
if more_indices:
slices = zip(*slices)
return slices
def empty(*items):
if not items:
return True
result = []
for item in items:
if isinstance(item, (pd.Series, pd.DataFrame)):
result.append(False if not item.empty else True)
elif isinstance(item, (pd.Index, np.ndarray)):
result.append(False if item.size else True)
else:
result.append(False if item else True)
return all(result)
def merge_dimensions(ncids):
ncids = sorted(ncids, key=lambda nc: len(nc.dimensions), reverse=True)
dims = OrderedDefaultDict(list)
for nc in ncids:
for d in nc.dimensions.values():
dims[d.name].append(None if d.isunlimited() else len(d))
for name, val in dims.items():
dims[name] = max(val)
return OrderedDict(dims)
def name_dtype_dimension(ncids, varlist):
ncids = sorted(ncids, key=lambda nc: len(nc.variables), reverse=True)
varlist = OrderedDict.fromkeys(varlist)
for name in varlist:
for nc in ncids:
if name in nc.variables:
obj = nc.variables[name]
varlist[name] = var_signature(
obj.name, obj.dtype, obj.dimensions)
break
varlist['time'] = time_signature
for name, val in varlist.items():
if val is None:
varlist.popitem(name)
return varlist
def _ensure_time_in_variable_list(varlist):
if 'time' not in varlist:
time_index = varlist.index('date') - 1
varlist.insert(time_index, 'time')
return varlist
def merge_variable_names(ncids):
ncids = sorted(ncids, key=lambda nc: len(nc.variables), reverse=True)
names = [name for nc in ncids for name in nc.variables]
names = list(pd.Series(names, names).drop_duplicates())
varlist = []
for name in group_mapping:
varlist.extend(filter(lambda x: x.startswith(name), names))
rest = pd.Index(names).difference(pd.Index(varlist))
varlist.extend(filter(lambda x: x in rest, names))
varlist = _ensure_time_in_variable_list(varlist)
varlist = name_dtype_dimension(ncids, varlist)
return varlist
@memoize
def group_primary_ds(ncid, key):
valid_keys = group_mapping.values()
assert key in valid_keys, "valid keys: {}".format(valid_keys)
if key not in ncid.variables:
return pd.Series([])
values = netCDF4.chartostring(ncid.variables[key][:])
indices, values = zip(*[
(ind, val) for (ind, val) in enumerate(values) if val])
indices = np.array(indices)
values = np.array(values)
return pd.Series(indices, index=values, name=key)
def merged_group_primary_ds(ncids, key):
valid_keys = group_mapping.values()
assert key in valid_keys, "valid keys: {}".format(valid_keys)
series = [group_primary_ds(nc, key) for nc in ncids]
series.sort(key=len, reverse=True)
combined = pd.concat(series).drop_duplicates().index
return pd.Series(np.arange(len(combined)), index=combined, name=key)
def ncattrs(ncids):
ncids = sorted(ncids, key=lambda nc: len(nc.variables), reverse=True)
attrs = OrderedDefaultDict(OrderedDict)
for nc in ncids:
for name in nc.variables:
attrs[name].update(nc[name].__dict__)
attrs['time'].update(time_metadata)
if 'values' in attrs:
attrs['values'].update(fill_value)
if 'sfcvalues' in attrs:
attrs['sfcvalues'].update(fill_value)
return attrs
@memoize
def parse_date(ncid):
"Fix typo in date if applicable."
dates = netCDF4.chartostring(ncid['date'][:])
_dates = pd.to_datetime(dates).to_series()
dif = _dates.diff()
mask = dif < pd.Timedelta(-1, 'ns')
if not mask.any():
# return pd.to_datetime(dates)
return pd.Series(np.arange(len(_dates)), index=_dates, name='date')
tofix = dif[mask].abs()
print("date typos", list(_dates[mask]))
freq = dif.dropna().value_counts().argmax()
correction = _dates[mask] + freq + tofix
print("corrections", list(correction))
_dates[mask] = correction
# return pd.to_datetime(_dates.values)
return pd.Series(np.arange(len(_dates)), index=_dates, name='date')
def dates_as_array_of_strings(dates, ncids):
ncid = ncids
if isinstance(ncids, (list, tuple, set, dict)):
ncid = ncids[0]
ndates, numchars = ncid.variables['date'].shape
stringtoarr = netCDF4.stringtoarr
if getattr(dates, 'index', None) is None:
dates_obj = dates
else:
dates_obj = dates.index
dates_formatted = dates_obj.strftime("%Y%m%dT%H%M%SZ")
dates_str = np.vstack(
[stringtoarr(d, NUMCHARS=numchars) for d in dates_formatted])
return dates_str
def merge_parse_date(ncids, fullday_timesteps=True):
"""
This does not literally merge the dates from files as is.
It looks at the total span of the combined dates and produces
a time series with a uniform frequency. If the datasets have
varying frequency, it opts for maximum occuring frequency.
"""
dates = [parse_date(nc) for nc in ncids]
freq = max([d.index.to_series().diff().dropna().value_counts().argmax()
for d in dates])
dates_min = min([d.index.min() for d in dates])
dates_max = max([d.index.max() for d in dates])
if fullday_timesteps:
if dates_min.time():
dates_min = pd.Timestamp(dates_min.date())
if dates_max.time():
dates_max = dates_max.date() + pd.Timedelta('1 day')
dates = pd.date_range(dates_min, dates_max, freq=freq)
return pd.Series(np.arange(len(dates)), index=dates, name='date')
def create_time_step(ntimesteps, dtype='i4'):
return np.arange(ntimesteps, dtype=dtype) * 3
def create_time(dates, metadata=time_metadata):
start_date = str(dates[0])
units = metadata['units']
calendar = metadata['calendar']
units = " ".join([units.strip(), start_date])
metadata['units'] = units
tis = netCDF4.date2num(dates.to_pydatetime(), units, calendar)
dtype = time_signature.dtype
return tis.astype(dtype)
def domain_in_filename(files):
domain_re = re.compile("(DOM\d+)").search
if isinstance(files, (basestring, str)):
dom = domain_re(files)
if dom:
return dom.group(0)
objs = [domain_re(f) for f in files]
doms = [obj.group(0) for obj in objs if obj]
doms = set(doms)
if len(doms) > 1:
raise ValueError('Mutiple domains detected: {}'.format(doms))
return doms.pop()
def get_indices(merged_ds, var_ds, varnames=None):
"""
merge_ds: pd.Series
var_ds: pd.Series
varnames: (pd.Index or list or array or None)
"""
required = merged_ds.index.intersection(var_ds.index)
if varnames is not None:
required = required.intersection(varnames)
varnames = varnames.drop(required, errors='ignore')
merged_index = merged_ds[required].values
var_index = var_ds[required].values
if varnames is not None:
return merged_index, var_index, varnames
return merged_index, var_index
def get_time_indices(merged_ds, var_ds, visited_dates):
"""
merge_ds: pd.Series
var_ds: pd.Series
visited_dates: (pd.Index or list or np.array)
"""
if not isinstance(visited_dates, (pd.Index, list, np.array)):
msg = ("'visited_dates' must be one of these types "
"(pd.Index list np.array)")
raise ValueError(msg)
visited_dates = pd.Index(visited_dates)
notseen = var_ds.index.drop(visited_dates, errors='ignore')
visited_dates = visited_dates.append(notseen).sort_values()
return merged_ds[notseen].values, var_ds[notseen].values, visited_dates
def copy_group_data(ncids, outnc, group_key, varlist):
valid_keys = group_mapping.keys()
assert group_key in valid_keys, "valid keys: {}".format(valid_keys)
primary_key = group_mapping[group_key]
merged_ds = merged_group_primary_ds(ncids, key=primary_key)
grp_names = filter(lambda x: x.startswith(group_key), varlist)
for varname in grp_names:
oobj = outnc.variables[varname]
ndims = oobj.ndim
pending = merged_ds.index[:]
for nc in ncids:
if varname not in nc.variables:
continue
var_ds = group_primary_ds(nc, primary_key)
merged_index, var_index, pending = get_indices(
merged_ds, var_ds, pending)
if empty(merged_index, var_index):
continue
data = nc.variables[varname][var_index]
if ndims == 1:
oobj[merged_index] = data
elif ndims == 2:
oobj[merged_index, :] = data
elif ndims == 3:
oobj[merged_index, :, :] = data
else:
raise NotImplemented("only upto 3 dimensions implemented.")
if empty(pending):
break
return
def merge_datasets(files, reference_file=None, domain=None, outfile=None,
fullday_timesteps=True):
if domain is None:
domain = domain_in_filename(files)
if domain is None:
domain = "DOMxx"
ncids = allncids = [netCDF4.Dataset(f) for f in files]
if reference_file is not None:
refnc = netCDF4.Dataset(reference_file)
allncids = [refnc] + ncids
print("reading dates from files...")
dates_ds = merge_parse_date(ncids, fullday_timesteps=fullday_timesteps)
time_step = create_time_step(len(dates_ds))
time_data = create_time(dates_ds.index)
if outfile is None:
start_ts = dates_ds.index[0].strftime("%Y%m%dT%H%M%SZ")
end_ts = dates_ds.index[-1].strftime("%Y%m%dT%H%M%SZ")
outfile = "1d_vars_{}_{}-{}.nc".format(domain, start_ts, end_ts)
print("outfile is set to: {}".format(outfile))
outnc = netCDF4.Dataset(outfile, "w")
print("Creating dimensions")
dimensions = merge_dimensions(allncids)
for dname, dsize in dimensions.items():
outnc.createDimension(dname, dsize)
varlist = merge_variable_names(allncids)
attrs = ncattrs(allncids)
print("Creating variables")
for vname, vsig in varlist.items():
obj = outnc.createVariable(*vsig)
obj.setncatts(attrs[vname])
station_ds = merged_group_primary_ds(allncids, key='station_name')
profile_ds = merged_group_primary_ds(allncids, key='var_name')
surface_ds = merged_group_primary_ds(allncids, key='sfcvar_name')
print("copying metadata")
for group_key in group_mapping:
copy_group_data(allncids, outnc, group_key, varlist)
ts_obj = outnc.variables['time_step']
ts_obj[:] = time_step[:]
time_obj = outnc.variables['time']
time_obj[:] = time_data[:]
date_obj = outnc.variables['date']
date_str = dates_as_array_of_strings(dates_ds.index, ncids)
date_obj[:, :] = date_str
# populating heights data
print('copying heights data')
heights_obj = outnc.variables['heights']
req_stations = station_ds.index[:]
req_profile = profile_ds.index[:]
for nc in ncids:
if 'heights' not in nc.variables:
continue
if 'var_name' not in nc.variables:
continue
if 'station_name' not in nc.variables:
continue
nc_station_ds = group_primary_ds(nc, 'station_name')
nc_profile_ds = group_primary_ds(nc, 'var_name')
if not empty(req_stations):
station_index, nc_station_index, req_stations = get_indices(
station_ds, nc_station_ds, req_stations)
else:
station_index, nc_station_index = get_indices(
station_ds, nc_station_ds)
if not empty(req_profile):
profile_index, nc_profile_index, req_profile = get_indices(
profile_ds, nc_profile_ds, req_profile)
else:
profile_index, nc_profile_index = get_indices(
profile_ds, nc_profile_ds)
# print('station_index', station_index)
# print('nc_station_index', nc_station_index)
# print('req_stations', req_stations)
# print('profile_index', profile_index)
# print('nc_profile_index', nc_profile_index)
# print('req_profiles', req_profile)
if empty(station_index):
continue
if empty(profile_index):
continue
station_slices, nc_station_slices = index2slice(
station_index, nc_station_index)
profile_slices, nc_profile_slices = index2slice(
profile_index, nc_profile_index)
for ps, nc_ps in zip(profile_slices, nc_profile_slices):
for sta, nc_sta in zip(station_slices, nc_station_slices):
data = nc.variables['heights'][:, nc_ps, nc_sta]
heights_obj[:, ps, sta] = data
# if station_index.size and profile_index.size:
# data = nc.variables['heights'][:]
# for pind, pind_m in zip(nc_profile_index, profile_index):
# for sind, sind_m in zip(nc_station_index, station_index):
# heights_obj[:, pind_m, sind_m] = data[:, pind, sind]
# if req_stations.empty and req_profile.empty:
if empty(req_stations, req_profile):
break
# populating profile data
print("copying profile data")
profile_obj = outnc.variables['values']
seen_dates = | pd.Index([]) | pandas.Index |
# -*- coding: utf-8 -*-
import locale
from datetime import date
from os import chdir, path
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from adjustText import adjust_text
from matplotlib.ticker import PercentFormatter
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from custom.plots import apply_plot_treatment, get_xticks_labels, palette
from custom.plots_confronti import (compute_vaccini_decessi_eu, fit_model,
get_epidemic_data, get_vaccine_data,
import_epidem_data, import_vaccines_data,
paesi_abitanti_eu, paesi_eu_ita)
from custom.watermarks import add_last_updated, add_watermark
def map_vaccinated(f_vacc):
if f_vacc >= 0 and f_vacc < 20:
return "0%-20%"
elif f_vacc >= 20 and f_vacc < 40:
return "20-40%"
elif f_vacc >= 40 and f_vacc < 60:
return "40-60%"
elif f_vacc >= 60 and f_vacc < 80:
return "60-80%"
elif f_vacc >= 80 and f_vacc <= 100:
return "80-100%"
def group_vaccinated(vacc_res_2021, dec_res_2021):
df_res = pd.DataFrame(vacc_res_2021, columns=["vaccinati"])
df_res["deceduti"] = dec_res_2021
df_res["vacc_mapped"] = df_res["vaccinati"].apply(map_vaccinated)
df_grouped = df_res.groupby("vacc_mapped").mean()["deceduti"]
return df_grouped
# Rappresentazione grafica risultati
@mpl.rc_context({"lines.marker": None})
def plot_selection(show=False):
""" Plot dati epidemiologia e vaccini dei paesi selezionati """
# nota: nomi in Inglese
nomi_nazioni = ["Italy", "Romania", "Portugal", "Spain", "Bulgaria"]
label_nazioni = ["Italia", "Romania", "Portogallo", "Spagna", "Bulgaria"]
abitanti_nazioni = [59.55, 19.29, 10.31, 47.35, 6.927]
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))
# Unpack all the axes subplots
axes = ax.ravel()
for i in range(len(nomi_nazioni)):
df_epid = get_epidemic_data(nomi_nazioni[i],
df_confirmed,
df_deaths,
df_recovered)
mask_ = df_epid.index >= "2021-06-01"
df_epid = df_epid.loc[mask_, :]
values = 1/(abitanti_nazioni[i])*(df_epid["Total deaths"]-df_epid["Total deaths"][0])
values.plot(ax=axes[0], label=label_nazioni[i])
for i in range(len(nomi_nazioni)):
df_country = get_vaccine_data(df_vacc, nomi_nazioni[i])
mask_ = df_country.index >= "2021-06-01"
df_country = df_country.loc[mask_, :]
df_country["% fully vaccinated"].plot(ax=axes[1],
label=label_nazioni[i],
linewidth=2)
x_ticks, x_labels = get_xticks_labels(df_country.index)
axes[0].set_title("Decessi dal 1° Giugno ad oggi")
axes[0].set_ylabel("Decessi per milione di abitanti")
axes[0].set_xlabel("")
axes[0].set_xticks(x_ticks)
axes[0].set_xticklabels(x_labels)
axes[0].legend()
axes[0].grid()
axes[1].set_ylim(0, 100)
axes[1].set_yticks(np.arange(0, 101, 20))
axes[1].set_yticklabels(["0%", "20%", "40%", "60%", "80%", "100%"])
axes[1].set_title("Vaccinati con ciclo completo")
axes[1].set_xlabel("")
axes[1].set_xticks(x_ticks)
axes[1].set_xticklabels(x_labels)
axes[1].legend()
axes[1].grid()
# Add watermarks
add_last_updated(fig, axes[-1], dati="JHU, Our World in Data")
fig.tight_layout()
fig.savefig("../risultati/confronto_nazioni_epidemia-vaccino.png",
dpi=300,
bbox_inches="tight")
if show:
plt.show()
@mpl.rc_context({"lines.marker": None})
def plot_corr_vaccini_decessi(show=False):
""" scatter plot correlazione vaccini e decessi """
# linear fit
x_grid, y_grid, score = fit_model(vacc_res, dec_res)
# calcola coefficiente di correlazione (pearson)
corr_coeff = round(np.corrcoef(vacc_res, dec_res)[0, 1], 2)
fig, ax = plt.subplots(figsize=(13, 8))
# scatter plot
volume = dec_res.max()*0.050
# genera lista di colori
num_colors = len(paesi_abitanti_eu)
cm = plt.get_cmap("GnBu_r")
ax.set_prop_cycle("color", [cm(i/num_colors) for i in range(num_colors)])
for i in range(num_colors):
ax.scatter(df_["% vaccini"].values[i], df_["decessi"].values[i],
alpha=0.50, edgecolor="black", linewidth=0.75, s=volume)
texts = [ax.text(vacc_res[i], dec_res[i], paesi_eu_ita[i])
for i in range(len(paesi_eu_ita))]
# fix text overlap
adjust_text(texts,
expand_text=(1.20, 1.35),
arrowprops=dict(arrowstyle="-", color="black",
linewidth=.75))
# linear fit plot
ax.plot(x_grid, y_grid, linestyle="--", c=palette[0],
label=f"Fit lineare, R$^2$ score={score}")
# parabolic fit
x_grid_p, y_grid_p, score_p = fit_model(vacc_res, dec_res, degree=2)
ax.plot(x_grid_p, y_grid_p, linestyle="--", c=palette[2],
label=f"Fit parabolico, R$^2$ score={score_p}")
ax.set_ylim(-70, )
ax.set_xlim(0, 100)
title = f"Frazione di vaccinati vs decessi nei 27 Paesi dell'UE dal 22/09/2021\n\
Coefficiente di correlazione = {corr_coeff}"
ax.set_title(title, fontsize=15)
ax.set_xlabel("Frazione media di vaccinati con almeno 1 dose al 22/09/2021", fontsize=15)
ax.set_ylabel("Decessi per milione di abitanti", fontsize=15)
ax.set_xticks(np.arange(0, 101, 20), ["0%", "20%", "40%", "60%", "80%", "100%"])
ax.grid()
ax.legend(fontsize=15)
fig.tight_layout()
# bar plot
df_grouped = group_vaccinated(vacc_res, dec_res)
ax_bar = inset_axes(ax, "30%", "30%",
loc="lower left",
bbox_to_anchor=(0.01, 0.075, 0.98, 0.95),
bbox_transform=ax.transAxes)
ax_bar.set_facecolor((0, 0, 0, 0))
ax_bar.bar(df_grouped.index, df_grouped, width=1,
edgecolor="black", color=palette[1], alpha=0.30)
labels_pad = 50 if df_grouped.max() > 1000 else 10
for index, data in enumerate(df_grouped):
ax_bar.text(x=index, y=data+labels_pad,
ha="center", s=round(data),
fontdict=dict(fontweight="bold"))
ax_bar.xaxis.set_tick_params(rotation=0)
ax_bar.set_title("Decessi medi per milione", pad=15)
ax_bar.set_xlabel("Frazione media vaccinati")
ax_bar.set_yticks([])
ax_bar.spines["bottom"].set_linewidth(1.5)
ax_bar.spines["bottom"].set_color("black")
# Add watermarks
fig.text(0.95, 0.425,
"github.com/apalladi/covid_vaccini_monitoraggio",
fontsize=16,
alpha=0.50,
color=palette[-1],
va="center",
rotation="vertical")
add_last_updated(fig, ax, dati="JHU, Our World in Data", y=-0.05)
fig.savefig("../risultati/vaccini_decessi_EU.png",
dpi=300,
bbox_inches="tight")
if show:
plt.show()
def which_axe(axis, step=10):
axis.set_yticklabels([])
start, end = axis.get_xlim()
axis.xaxis.set_ticks(np.arange(start, end, step))
x_ticks = axis.xaxis.get_major_ticks()
x_ticks[0].label1.set_visible(False)
x_ticks[-1].label1.set_visible(False)
ymin, ymax = axis.get_ylim()
axis.axvline(0.1, ymin=ymin, ymax=ymax, color="black", linewidth=0.5)
axis.grid()
def plot_corr_vaccini_decessi_div(show=False):
""" tornado plot vaccini vs decessi """
# ordina valori in un df
df_ = pd.DataFrame({"% vaccini": vacc_res, "decessi": dec_res})
df_.index = paesi_eu_ita
df_.sort_values(by="% vaccini", inplace=True)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 5), sharey=True)
# Unpack all the axes subplots
axes = ax.ravel()
axes[0].barh(df_.index, df_["% vaccini"], facecolor=palette[5])
axes[0].set_xlim(0, 101)
axes[0].xaxis.set_major_formatter(PercentFormatter())
axes[0].set_title("Frazione di vaccinati", color=palette[5], size=10)
axes[0].set_xlabel("Frazione media di vaccinati con almeno 1 dose al 22/09/2021")
which_axe(axes[0])
axes[0].invert_xaxis()
for country in df_.index:
axes[0].text(x=1.0, y=country, color="white",
va="center", ha="right", s=country,
fontdict=dict(fontweight="bold", size=6))
axes[1].barh(df_.index, df_["decessi"], facecolor=palette[4])
axes[1].set_title("Decessi per milione di abitanti", color=palette[4], size=10)
axes[1].set_xlabel("Decessi per milione di abitanti dal 22/09/2021")
which_axe(axes[1], step=250)
title = "Frazione di vaccinati vs decessi nei 27 Paesi dell'UE dal 22/09/2021"
fig.suptitle(title)
# Add watermarks
add_watermark(fig)
add_last_updated(fig, axes[-1], dati="JHU, Our World in Data", y=-0.030)
fig.subplots_adjust(wspace=0, hspace=0)
fig.savefig("../risultati/vaccini_decessi_EU_div.png", dpi=300, bbox_inches="tight")
if show:
plt.show()
if __name__ == "__main__":
# Set work directory for the script
scriptpath = path.dirname(path.realpath(__file__))
chdir(scriptpath)
# Set locale to "it" to parse the month correctly
locale.setlocale(locale.LC_ALL, "it_IT.UTF-8")
# importa dati
df_confirmed, df_deaths, df_recovered = import_epidem_data()
df_vacc = import_vaccines_data()
# recupera dati vaccini vs. decessi
# da inizio autunno (22 settembre 2021)
window = abs((date.today() - date(2021, 9, 22)).days)
# recupera dati per la finestra temporale selezionata
vacc_res, dec_res = compute_vaccini_decessi_eu(df_vacc, df_deaths,
window, fully=False)
# Imposta stile grafici
apply_plot_treatment()
# plot dati selezione paesi
plot_selection()
# ordina valori in un df per far si che seguano la sequenza dei colori
df_ = | pd.DataFrame({"% vaccini": vacc_res, "decessi": dec_res}) | pandas.DataFrame |
import pandas as pd
# A simple script to convert my excel file to be readable by YNAB.
# YNAB wants: Date,Payee,Category,Memo,Outflow,Inflow
__author__ = "<NAME> <<EMAIL>>"
import_csv = 'xacts.csv'
# read csv
df = pd.read_csv(import_csv, encoding = "ISO-8859-1", thousands=',')
# Build YNAB Category
df['Category'] = df['Master Category'] + ":" + df['Sub Category']
# Move column order
df = df[['Date',
'Payee',
'Category',
'Memo',
'Outflow',
'Inflow',
'Account']]
# Change Types
df['Outflow'] = | pd.to_numeric(df['Outflow']) | pandas.to_numeric |
####################################################
# IMPORTS (FROM LIBRARY) ###########################
####################################################
from pandas import DataFrame
####################################################
# FUNCTION TO GENERATE THE PARTICIPATION DATA ######
####################################################
def generate_data(data):
users = list(data[0])
events = list(data[1])
data = []
for user in users:
for event in events:
if (user['Id'] == event['Id']):
temp = dict(user)
temp['Event'] = event['Event']
data.append(temp)
df = | DataFrame(data) | pandas.DataFrame |
"""Unit tests for the reading functionality in dframeio.parquet"""
# pylint: disable=redefined-outer-name
from pathlib import Path
import pandas as pd
import pandera as pa
import pandera.typing
import pytest
from pandas.testing import assert_frame_equal
import dframeio
class SampleDataSchema(pa.SchemaModel):
"""pandera schema of the parquet test dataset"""
registration_dttm: pa.typing.Series[pa.typing.DateTime]
id: pa.typing.Series[pd.Int64Dtype] = pa.Field(nullable=True, coerce=True)
first_name: pa.typing.Series[pa.typing.String]
last_name: pa.typing.Series[pa.typing.String]
email: pa.typing.Series[pa.typing.String]
gender: pa.typing.Series[pa.typing.String] = pa.Field(coerce=True)
ip_address: pa.typing.Series[pa.typing.String]
cc: pa.typing.Series[pa.typing.String]
country: pa.typing.Series[pa.typing.String]
birthdate: pa.typing.Series[pa.typing.String]
salary: pa.typing.Series[pa.typing.Float64] = pa.Field(nullable=True)
title: pa.typing.Series[pa.typing.String]
comments: pa.typing.Series[pa.typing.String] = pa.Field(nullable=True)
@staticmethod
def length():
"""Known length of the data"""
return 5000
@staticmethod
def n_salary_over_150000():
"""Number of rows with salary > 150000"""
return 2384
@pytest.fixture(params=["multifile", "singlefile.parquet", "multifolder"])
def sample_data_path(request):
"""Path of a parquet dataset for testing"""
return Path(__file__).parent / "data" / "parquet" / request.param
def read_sample_dataframe():
"""Read the sample dataframe to pandas and return a cached copy"""
if not hasattr(read_sample_dataframe, "df"):
parquet_file = Path(__file__).parent / "data" / "parquet" / "singlefile.parquet"
backend = dframeio.ParquetBackend(str(parquet_file.parent))
read_sample_dataframe.df = backend.read_to_pandas(parquet_file.name)
return read_sample_dataframe.df.copy()
@pytest.fixture(scope="function")
def sample_dataframe():
"""Provide the sample dataframe"""
return read_sample_dataframe()
@pytest.fixture(scope="function")
def sample_dataframe_dict():
"""Provide the sample dataframe"""
parquet_file = Path(__file__).parent / "data" / "parquet" / "singlefile.parquet"
backend = dframeio.ParquetBackend(str(parquet_file.parent))
return backend.read_to_dict(parquet_file.name)
@pytest.mark.parametrize(
"kwargs, exception",
[
({"base_path": "/some/dir", "partitions": -1}, TypeError),
({"base_path": "/some/dir", "partitions": 2.2}, TypeError),
({"base_path": "/some/dir", "partitions": "abc"}, TypeError),
({"base_path": "/some/dir", "partitions": b"abc"}, TypeError),
({"base_path": "/some/dir", "rows_per_file": b"abc"}, TypeError),
({"base_path": "/some/dir", "rows_per_file": 1.1}, TypeError),
({"base_path": "/some/dir", "rows_per_file": -5}, ValueError),
],
)
def test_init_argchecks(kwargs, exception):
"""Challenge the argument validation of the constructor"""
with pytest.raises(exception):
dframeio.ParquetBackend(**kwargs)
def test_read_to_pandas(sample_data_path):
"""Read a sample dataset into a pandas dataframe"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name)
SampleDataSchema.to_schema().validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_pandas_some_columns(sample_data_path):
"""Read a sample dataset into a pandas dataframe, selecting some columns"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, columns=["id", "first_name"])
SampleDataSchema.to_schema().select_columns(["id", "first_name"]).validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_pandas_some_rows(sample_data_path):
"""Read a sample dataset into a pandas dataframe, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, row_filter="salary > 150000")
SampleDataSchema.to_schema().validate(df)
assert len(df) == SampleDataSchema.n_salary_over_150000()
def test_read_to_pandas_sample(sample_data_path):
"""Read a sample dataset into a pandas dataframe, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, sample=10)
SampleDataSchema.to_schema().validate(df)
assert len(df) == 10
@pytest.mark.parametrize("limit", [0, 10])
def test_read_to_pandas_limit(sample_data_path, limit):
"""Read a sample dataset into a pandas dataframe, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, limit=limit)
SampleDataSchema.to_schema().validate(df)
assert len(df) == limit
def test_read_to_pandas_base_path_check(sample_data_path):
"""Try if it isn't possible to read from outside the base path"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
with pytest.raises(ValueError):
backend.read_to_pandas("/tmp")
def test_read_to_dict(sample_data_path):
"""Read a sample dataset into a dictionary"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name)
assert isinstance(df, dict)
assert set(df.keys()) == SampleDataSchema.to_schema().columns.keys()
df = pd.DataFrame(df)
SampleDataSchema.to_schema().validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_dict_some_columns(sample_data_path):
"""Read a sample dataset into a dictionary, filtering some columns"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name, columns=["id", "first_name"])
assert isinstance(df, dict)
assert set(df.keys()) == {"id", "first_name"}
df = pd.DataFrame(df)
SampleDataSchema.to_schema().select_columns(["id", "first_name"]).validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_dict_some_rows(sample_data_path):
"""Read a sample dataset into a dictionary, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name, row_filter="salary > 150000")
assert isinstance(df, dict)
assert set(df.keys()) == SampleDataSchema.to_schema().columns.keys()
df = pd.DataFrame(df)
SampleDataSchema.to_schema().validate(df)
assert len(df) == SampleDataSchema.n_salary_over_150000()
def test_read_to_dict_limit(sample_data_path):
"""Read a sample dataset into a dictionary, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name, columns=["id", "first_name"], limit=10)
assert isinstance(df, dict)
assert set(df.keys()) == {"id", "first_name"}
df = pd.DataFrame(df)
SampleDataSchema.to_schema().select_columns(["id", "first_name"]).validate(df)
assert len(df) == 10
def test_read_to_dict_sample(sample_data_path):
"""Read a sample dataset into a dictionary, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name, sample=10)
assert isinstance(df, dict)
assert set(df.keys()) == SampleDataSchema.to_schema().columns.keys()
df = | pd.DataFrame(df) | pandas.DataFrame |
import requests,json,os,re,argparse
import pandas as pd
from time import sleep
parser=argparse.ArgumentParser()
parser.add_argument('-i','--input_file', required=True, help='Input csv file with user name and orcid id')
parser.add_argument('-o','--output_xml', required=True, help='Output xml file')
args=parser.parse_args()
input_file=args.input_file
output_xml=args.output_xml
def get_pmc_data(orcid_id,cursor=''):
'''
A method for fetching pmc data
:param orcid_id: An orcid id
:param cursor: A cursor string, default empty string
'''
try:
data=list()
url_str='https://www.ebi.ac.uk/europepmc/webservices/rest/search?query=AUTHORID:{0}&format=json&sort_date:y%20BDESC&cursorMark={1}'.format(orcid_id,cursor)
response=requests.get(url_str)
if response.ok:
json_data=json.loads(response.content.decode('utf-8'))
data=json_data['resultList']['result']
#print(json_data)
if 'nextCursorMark' in json_data:
if cursor !=json_data['nextCursorMark']:
cursor=json_data['nextCursorMark']
else:
cursor=''
return data,cursor
except:
raise
def add_pmc_link(series):
'''
A method for adding pubmed link to the data table
:param series: A data series with 'pmid' (pubmed id)
'''
try:
pmid=series['pmid']
series['link']='https://www.ncbi.nlm.nih.gov/pubmed/{0}'.format(pmid)
return series
except:
raise
def get_pmc_data_for_user(user,orcid_id):
'''
A method for fetching all publication info for a user
:param user: A user name
:param orcid_id: An orcid id for PMC lookup
:returns: A dataframe containing list of publications
'''
try:
all_data=list()
cursor=''
while True:
data,cursor=get_pmc_data(orcid_id=orcid_id,cursor=cursor)
if len(data)>0 or cursor !='':
all_data.extend(data)
sleep(10)
else:
break
all_data=pd.DataFrame(all_data)
all_data['user']=user
all_data=all_data.apply(lambda x: add_pmc_link(series=x),
axis=1)
return all_data
except:
raise
def get_publication_list(input_file):
'''
A method for fetching publication list and writing it to an output csv file
:param input_file: An input csv file containing 'name' and 'orcid' column
returns: A pandas dataframe containing publication info of all the users
'''
try:
final_data=pd.DataFrame()
input_data= | pd.read_csv(input_file) | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import random
import numpy as np
import pandas as pd
from pandas.compat import lrange
from pandas.api.types import CategoricalDtype
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range, NaT, IntervalIndex, Categorical)
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSorting(TestData):
def test_sort_values(self):
frame = DataFrame([[1, 1, 2], [3, 1, 0], [4, 5, 6]],
index=[1, 2, 3], columns=list('ABC'))
# by column (axis=0)
sorted_df = frame.sort_values(by='A')
indexer = frame['A'].argsort().values
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=['A'], ascending=[False])
assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=['B', 'C'])
expected = frame.loc[[2, 1, 3]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=['B', 'C'], ascending=False)
assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=['B', 'A'], ascending=[True, False])
assert_frame_equal(sorted_df, expected)
pytest.raises(ValueError, lambda: frame.sort_values(
by=['A', 'B'], axis=2, inplace=True))
# by row (axis=1): GH 10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis='columns')
expected = frame.reindex(columns=['B', 'A', 'C'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1,
ascending=[True, False])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
msg = r'Length of ascending \(5\) != length of by \(2\)'
with tm.assert_raises_regex(ValueError, msg):
frame.sort_values(by=['A', 'B'], axis=0, ascending=[True] * 5)
def test_sort_values_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
sorted_df = frame.copy()
sorted_df.sort_values(by='A', inplace=True)
expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=1, axis=1, inplace=True)
expected = frame.sort_values(by=1, axis=1)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by='A', ascending=False, inplace=True)
expected = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=['A', 'B'], ascending=False, inplace=True)
expected = frame.sort_values(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
def test_sort_nan(self):
# GH3917
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# sort one column only
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A'], na_position='first')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A'], na_position='first', ascending=False)
assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=['B', 'A'])
sorted_df = df.sort_values(by=1, axis=1, na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{'A': [1, 1, 2, 4, 6, 8, nan],
'B': [2, 9, nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2])
sorted_df = df.sort_values(['A', 'B'])
assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 2, 9, nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], ascending=[
1, 0], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{'A': [8, 6, 4, 2, 1, 1, nan],
'B': [4, 5, 5, nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2])
sorted_df = df.sort_values(['A', 'B'], ascending=[
0, 1], na_position='last')
assert_frame_equal(sorted_df, expected)
# Test DataFrame with nan label
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(
kind='quicksort', ascending=True, na_position='last')
expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position='first')
expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],
'B': [5, 9, nan, 5, 2, 5, 4]},
index=[nan, 1, 2, 3, 4, 5, 6])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind='quicksort', ascending=False)
expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],
'B': [4, 5, 2, 5, nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind='quicksort', ascending=False, na_position='first')
expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],
'B': [5, 4, 5, 2, 5, nan, 9]},
index=[nan, 6, 5, 4, 3, 2, 1])
assert_frame_equal(sorted_df, expected)
def test_stable_descending_sort(self):
# GH #6399
df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],
columns=['sort_col', 'order'])
sorted_df = df.sort_values(by='sort_col', kind='mergesort',
ascending=False)
assert_frame_equal(df, sorted_df)
def test_stable_descending_multicolumn_sort(self):
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# test stable mergesort
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 2, 9]},
index=[2, 5, 4, 6, 1, 3, 0])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 1],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 0],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_stable_categorial(self):
# GH 16793
df = DataFrame({
'x': pd.Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)
})
expected = df.copy()
sorted_df = df.sort_values('x', kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_sort_datetimes(self):
# GH 3461, argsort / lexsort differences for a datetime column
df = DataFrame(['a', 'a', 'a', 'b', 'c', 'd', 'e', 'f', 'g'],
columns=['A'],
index=date_range('20130101', periods=9))
dts = [Timestamp(x)
for x in ['2004-02-11', '2004-01-21', '2004-01-26',
'2005-09-20', '2010-10-04', '2009-05-12',
'2008-11-12', '2010-09-28', '2010-09-28']]
df['B'] = dts[::2] + dts[1::2]
df['C'] = 2.
df['A1'] = 3.
df1 = df.sort_values(by='A')
df2 = df.sort_values(by=['A'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['B'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['C', 'B'])
assert_frame_equal(df1, df2)
def test_frame_column_inplace_sort_exception(self):
s = self.frame['A']
with tm.assert_raises_regex(ValueError, "This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_nat_values_in_int_column(self):
# GH 14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT))
float_values = (2.0, -1.797693e308)
df = DataFrame(dict(int=int_values, float=float_values),
columns=["int", "float"])
df_reversed = DataFrame(dict(int=int_values[::-1],
float=float_values[::-1]),
columns=["int", "float"],
index=[1, 0])
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(dict(datetime=[Timestamp("2016-01-01"), NaT],
float=float_values), columns=["datetime", "float"])
df_reversed = DataFrame(dict(datetime=[NaT, Timestamp("2016-01-01")],
float=float_values[::-1]),
columns=["datetime", "float"],
index=[1, 0])
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ['2016-01-01', '2015-01-01',
np.nan, '2016-01-01']]
d2 = [Timestamp(x) for x in ['2017-01-01', '2014-01-01',
'2016-01-01', '2015-01-01']]
df = pd.DataFrame({'a': d1, 'b': d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ['2015-01-01', '2016-01-01',
'2016-01-01', np.nan]]
d4 = [Timestamp(x) for x in ['2014-01-01', '2015-01-01',
'2017-01-01', '2016-01-01']]
expected = pd.DataFrame({'a': d3, 'b': d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=['a', 'b'], )
tm.assert_frame_equal(sorted_df, expected)
class TestDataFrameSortIndexKinds(TestData):
def test_sort_index_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'])
result = frame.sort_values(by=['A', 'B'])
indexer = np.lexsort((frame['B'], frame['A']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'], ascending=False)
result = frame.sort_values(by=['A', 'B'], ascending=False)
indexer = np.lexsort((frame['B'].rank(ascending=False),
frame['A'].rank(ascending=False)))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['B', 'A'])
result = frame.sort_values(by=['B', 'A'])
indexer = np.lexsort((frame['A'], frame['B']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.loc[[3, 2, 4, 1]]
a_id = id(unordered['A'])
df = unordered.copy()
df.sort_index(inplace=True)
expected = frame
assert_frame_equal(df, expected)
assert a_id != id(df['A'])
df = unordered.copy()
df.sort_index(ascending=False, inplace=True)
expected = frame[::-1]
assert_frame_equal(df, expected)
# axis=1
unordered = frame.loc[:, ['D', 'B', 'C', 'A']]
df = unordered.copy()
df.sort_index(axis=1, inplace=True)
expected = frame
assert_frame_equal(df, expected)
df = unordered.copy()
df.sort_index(axis=1, ascending=False, inplace=True)
expected = frame.iloc[:, ::-1]
assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['A', 'B'], ascending=[1, 0])
result = df.sort_values(by=['A', 'B'], ascending=[1, 0])
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
assert_frame_equal(result, expected)
# test with multiindex, too
idf = df.set_index(['A', 'B'])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
assert_frame_equal(result, expected)
# also, Series!
result = idf['C'].sort_index(ascending=[1, 0])
assert_series_equal(result, expected['C'])
def test_sort_index_duplicates(self):
# with 9816, these are all translated to .sort_values
df = DataFrame([lrange(5, 9), lrange(4)],
columns=['a', 'a', 'b', 'b'])
with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with tm.assert_raises_regex(ValueError, 'not unique'):
df.sort_values(by='a')
with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['a'])
with tm.assert_raises_regex(ValueError, 'not unique'):
df.sort_values(by=['a'])
with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
# multi-column 'by' is separate codepath
df.sort_index(by=['a', 'b'])
with tm.assert_raises_regex(ValueError, 'not unique'):
# multi-column 'by' is separate codepath
df.sort_values(by=['a', 'b'])
# with multi-index
# GH4370
df = DataFrame(np.random.randn(4, 2),
columns=MultiIndex.from_tuples([('a', 0), ('a', 1)]))
with tm.assert_raises_regex(ValueError, 'level'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with tm.assert_raises_regex(ValueError, 'level'):
df.sort_values(by='a')
# convert tuples to a list of tuples
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=[('a', 1)])
expected = df.sort_values(by=[('a', 1)])
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=('a', 1))
result = df.sort_values(by=('a', 1))
assert_frame_equal(result, expected)
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
res = df.sort_index(level='A', sort_remaining=False)
assert_frame_equal(df, res)
res = df.sort_index(level=['A', 'B'], sort_remaining=False)
assert_frame_equal(df, res)
def test_sort_index_categorical_index(self):
df = (DataFrame({'A': np.arange(6, dtype='int64'),
'B': Series(list('aabbca'))
.astype(CategoricalDtype(list('cab')))})
.set_index('B'))
result = df.sort_index()
expected = df.iloc[[4, 0, 1, 5, 2, 3]]
assert_frame_equal(result, expected)
result = df.sort_index(ascending=False)
expected = df.iloc[[3, 2, 5, 1, 0, 4]]
assert_frame_equal(result, expected)
def test_sort_index(self):
# GH13496
frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0 : sort rows by index labels
unordered = frame.loc[[3, 2, 4, 1]]
result = unordered.sort_index(axis=0)
expected = frame
assert_frame_equal(result, expected)
result = unordered.sort_index(ascending=False)
expected = frame[::-1]
assert_frame_equal(result, expected)
# axis=1 : sort columns by column names
unordered = frame.iloc[:, [2, 1, 3, 0]]
result = unordered.sort_index(axis=1)
assert_frame_equal(result, frame)
result = unordered.sort_index(axis=1, ascending=False)
expected = frame.iloc[:, ::-1]
assert_frame_equal(result, expected)
@pytest.mark.parametrize("level", ['A', 0]) # GH 21052
def test_sort_index_multiindex(self, level):
# GH13496
# sort rows by specified level of multi-index
mi = MultiIndex.from_tuples([
[2, 1, 3], [2, 1, 2], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mi)
expected_mi = MultiIndex.from_tuples([
[1, 1, 1],
[2, 1, 2],
[2, 1, 3]], names=list('ABC'))
expected = pd.DataFrame([
[5, 6],
[3, 4],
[1, 2]], index=expected_mi)
result = df.sort_index(level=level)
assert_frame_equal(result, expected)
# sort_remaining=False
expected_mi = MultiIndex.from_tuples([
[1, 1, 1],
[2, 1, 3],
[2, 1, 2]], names=list('ABC'))
expected = pd.DataFrame([
[5, 6],
[1, 2],
[3, 4]], index=expected_mi)
result = df.sort_index(level=level, sort_remaining=False)
assert_frame_equal(result, expected)
def test_sort_index_intervalindex(self):
# this is a de-facto sort via unstack
# confirming that we sort in the order of the bins
y = Series(np.random.randn(100))
x1 = Series(np.sign(np.random.randn(100)))
x2 = pd.cut(Series(np.random.randn(100)),
bins=[-3, -0.5, 0, 0.5, 3])
model = pd.concat([y, x1, x2], axis=1, keys=['Y', 'X1', 'X2'])
result = model.groupby(['X1', 'X2'], observed=True).mean().unstack()
expected = IntervalIndex.from_tuples(
[(-3.0, -0.5), (-0.5, 0.0),
(0.0, 0.5), (0.5, 3.0)],
closed='right')
result = result.columns.levels[1].categories
tm.assert_index_equal(result, expected)
def test_sort_index_na_position_with_categories(self):
# GH 22556
# Positioning missing value properly when column is Categorical.
categories = ['A', 'B', 'C']
category_indices = [0, 2, 4]
list_of_nans = [np.nan, np.nan]
na_indices = [1, 3]
na_position_first = 'first'
na_position_last = 'last'
column_name = 'c'
reversed_categories = sorted(categories, reverse=True)
reversed_category_indices = sorted(category_indices, reverse=True)
reversed_na_indices = sorted(na_indices, reverse=True)
df = pd.DataFrame({
column_name: pd.Categorical(['A', np.nan, 'B', np.nan, 'C'],
categories=categories,
ordered=True)})
# sort ascending with na first
result = df.sort_values(by=column_name,
ascending=True,
na_position=na_position_first)
expected = DataFrame({
column_name: Categorical(list_of_nans + categories,
categories=categories,
ordered=True)
}, index=na_indices + category_indices)
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import zipfile
import os
import geopy.distance
import random
import pandas as pd
import numpy as np
import csv
from enum import Enum
from yaml import safe_load
from maro.cli.data_pipeline.utils import download_file, StaticParameter
from maro.utils.logger import CliLogger
from maro.cli.data_pipeline.base import DataPipeline, DataTopology
logger = CliLogger(name=__name__)
class CitiBikePipeline(DataPipeline):
_download_file_name = "trips.zip"
_station_info_file_name = "full_station.json"
_clean_file_name = "trips.csv"
_build_file_name = "trips.bin"
_station_meta_file_name = "station_meta.csv"
_distance_file_name = "distance_adj.csv"
_meta_file_name = "trips.yml"
def __init__(self, topology: str, source: str, station_info: str, is_temp: bool = False):
"""
Generate citi_bike data bin and other necessary files for the specified topology from specified source.
They will be generated in ~/.maro/data/citi_bike/[topology]/_build.
Folder structure:
~/.maro
/data/citi_bike/[topology]
/_build bin data file and other necessary files
/source
/_download original data files
/_clean cleaned data files
/temp download temp files
Args:
topology(str): topology name of the data files
source(str): source url of original data file
station_info(str): source url of station info file
is_temp(bool): (optional) if the data file is temporary
"""
super().__init__("citi_bike", topology, source, is_temp)
self._station_info = station_info
self._station_info_file = os.path.join(self._download_folder, self._station_info_file_name)
self._distance_file = os.path.join(self._build_folder, self._distance_file_name)
self._station_meta_file = os.path.join(self._build_folder, self._station_meta_file_name)
self._common_data = {}
def download(self, is_force: bool = False):
"""download the zip file"""
super().download(is_force)
self._new_file_list.append(self._station_info_file)
if (not is_force) and os.path.exists(self._station_info_file):
logger.info_green("File already exists, skipping download.")
else:
logger.info_green(f"Downloading trip data from {self._station_info} to {self._station_info_file}")
download_file(source=self._station_info, destination=self._station_info_file)
def clean(self):
"""unzip the csv file and process it for building binary file"""
super().clean()
logger.info_green("Cleaning trip data")
if os.path.exists(self._download_file):
# unzip
logger.info_green("Unzip start")
with zipfile.ZipFile(self._download_file, "r") as zip_ref:
for filename in zip_ref.namelist():
# Only one csv file is expected.
if (
filename.endswith(".csv") and
(not (filename.startswith("__MACOSX") or filename.startswith(".")))
):
logger.info_green(f"Unzip {filename} from {self._download_file}")
zip_ref.extractall(self._clean_folder, [filename])
unzip_file = os.path.join(self._clean_folder, filename)
self._new_file_list.append(unzip_file)
self._preprocess(unzipped_file=unzip_file)
break
else:
logger.warning(f"Not found downloaded trip data: {self._download_file}")
def _read_common_data(self):
"""read and full init data and existed stations"""
full_stations = None
with open(self._station_info_file, mode="r", encoding="utf-8") as station_file:
# read station to station file
raw_station_data = pd.DataFrame.from_dict(pd.read_json(station_file)["data"]["stations"])
station_data = raw_station_data.rename(columns={
"lon": "station_longitude",
"lat": "station_latitude",
"region_id": "region"})
# group by station to generate station init info
full_stations = station_data[
["station_id", "capacity", "station_longitude", "station_latitude"]
].reset_index(drop=True)
# generate station id by index
full_stations["station_id"] = pd.to_numeric(full_stations["station_id"], downcast="integer")
full_stations["capacity"] = pd.to_numeric(full_stations["capacity"], downcast="integer")
full_stations["station_longitude"] = pd.to_numeric(full_stations["station_longitude"], downcast="float")
full_stations["station_latitude"] = pd.to_numeric(full_stations["station_latitude"], downcast="float")
full_stations.drop(full_stations[full_stations["capacity"] == 0].index, axis=0, inplace=True)
full_stations.dropna(
subset=["station_id", "capacity", "station_longitude", "station_latitude"], inplace=True
)
self._common_data["full_stations"] = full_stations
self._common_data["full_station_num"] = len(self._common_data["full_stations"])
self._common_data["full_dock_num"] = self._common_data["full_stations"]["capacity"].sum()
def _read_src_file(self, file: str):
"""read and return processed rows"""
ret = []
if os.path.exists(file):
# For ignoring the unimportant issues in the source file.
with open(file, "r", encoding="utf-8", errors="ignore") as fp:
ret = pd.read_csv(fp)
ret = ret[[
"tripduration", "starttime", "start station id", "end station id", "start station latitude",
"start station longitude", "end station latitude", "end station longitude", "gender", "usertype",
"bikeid"
]]
ret["tripduration"] = pd.to_numeric(
pd.to_numeric(ret["tripduration"], downcast="integer") / 60, downcast="integer"
)
ret["starttime"] = pd.to_datetime(ret["starttime"])
ret["start station id"] = pd.to_numeric(ret["start station id"], errors="coerce", downcast="integer")
ret["end station id"] = pd.to_numeric(ret["end station id"], errors="coerce", downcast="integer")
ret["start station latitude"] = pd.to_numeric(ret["start station latitude"], downcast="float")
ret["start station longitude"] = pd.to_numeric(ret["start station longitude"], downcast="float")
ret["end station latitude"] = pd.to_numeric(ret["end station latitude"], downcast="float")
ret["end station longitude"] = pd.to_numeric(ret["end station longitude"], downcast="float")
ret["bikeid"] = pd.to_numeric(ret["bikeid"], errors="coerce", downcast="integer")
ret["gender"] = pd.to_numeric(ret["gender"], errors="coerce", downcast="integer")
ret["usertype"] = ret["usertype"].apply(str).apply(
lambda x: 0 if x in ["Subscriber", "subscriber"] else 1 if x in ["Customer", "customer"] else 2
)
ret.dropna(subset=[
"start station id", "end station id", "start station latitude", "end station latitude",
"start station longitude", "end station longitude"
], inplace=True)
ret.drop(
ret[
(ret["tripduration"] <= 1) |
(ret["start station latitude"] == 0) |
(ret["start station longitude"] == 0) |
(ret["end station latitude"] == 0) |
(ret["end station longitude"] == 0)
].index,
axis=0,
inplace=True
)
ret = ret.sort_values(by="starttime", ascending=True)
return ret
def _process_src_file(self, src_data: pd.DataFrame):
used_bikes = len(src_data[["bikeid"]].drop_duplicates(subset=["bikeid"]))
trip_data = src_data[
(src_data["start station latitude"] > 40.689960) &
(src_data["start station latitude"] < 40.768334) &
(src_data["start station longitude"] > -74.019623) &
(src_data["start station longitude"] < -73.909760)
]
trip_data = trip_data[
(trip_data["end station latitude"] > 40.689960) &
(trip_data["end station latitude"] < 40.768334) &
(trip_data["end station longitude"] > -74.019623) &
(trip_data["end station longitude"] < -73.909760)
]
trip_data["start_station_id"] = trip_data["start station id"]
trip_data["end_station_id"] = trip_data["end station id"]
# get new stations
used_stations = []
used_stations.append(
trip_data[["start_station_id", "start station latitude", "start station longitude", ]].drop_duplicates(
subset=["start_station_id"]).rename(
columns={
"start_station_id": "station_id",
"start station latitude": "latitude",
"start station longitude": "longitude"
}))
used_stations.append(
trip_data[["end_station_id", "end station latitude", "end station longitude", ]].drop_duplicates(
subset=["end_station_id"]).rename(
columns={
"end_station_id": "station_id",
"end station latitude": "latitude",
"end station longitude": "longitude"
}))
in_data_station = | pd.concat(used_stations, ignore_index=True) | pandas.concat |
import boto3
import json
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from l4ecwcw import *
from io import StringIO
from matplotlib import gridspec
# Mandatory to ensure text is rendered in SVG plots:
matplotlib.rcParams['svg.fonttype'] = 'none'
client = boto3.client('lookoutequipment')
dpi = 100
def get_predictions(event, context):
model_name = event['model_name']
widget_context = event['widgetContext']
width = widget_context['width']
# Height taking into account the height of the tag selection form:
height = int(widget_context['height']) - 50
tags_list = get_tags_list(model_name)
tag = get_selected_tag(widget_context)
if tag is None:
tag = tags_list[0]
svg = get_model_evaluations_infos(model_name, width, height, tag)
html = build_tag_selection_form(event, context, tags_list, tag)
html = html + f'<div>{svg}</div>'
return html
def get_tags_list(model_name):
model_response = client.describe_model(ModelName=model_name)
predictions = json.loads(model_response['ModelMetrics'])['predicted_ranges']
diagnostics = predictions[0]['diagnostics']
tags_list = [d['name'].split('\\')[-1] for d in diagnostics]
return tags_list
def get_selected_tag(widget_context):
print(widget_context['forms']['all'])
if len(widget_context['forms']['all']) > 0:
return widget_context['forms']['all']['tags']
else:
return None
def get_model_evaluations_infos(model_name, width, height, tag):
model_response = client.describe_model(ModelName=model_name)
predictions = json.loads(model_response['ModelMetrics'])['predicted_ranges']
start_date = pd.to_datetime(model_response['EvaluationDataStartTime']).tz_localize(None)
end_date = | pd.to_datetime(model_response['EvaluationDataEndTime']) | pandas.to_datetime |
import string
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
###############
# for first time use uncomment this
#
# read in data
data=pd.read_csv('VS_Extensions_1week_correct.csv')
data=data.drop(['MacAddressHash1'], axis=1)
# now we need to parse out Extensions Used
df=data.groupby('MacAddressHash').agg(lambda x: ' , '.join(set(x))).reset_index()
df2 = df.ExtensionsUsed.str.split(' , ', expand=True)
df2 = pd.get_dummies(df2, prefix='', prefix_sep='')
#
dfpre = df2.groupby(by=df2.columns, axis=1).max()
#df2 = dfpre.groupby(dfpre.columns, axis=1).sum()
newdf= | pd.concat([df, dfpre], axis=1) | pandas.concat |
import xlrd
import os
import pandas as pd
os.chdir('/Users/zhengzhiheng/PycharmProjects/untitled3')
wordbook = xlrd.open_workbook('test.xlsx')
sheet_name = wordbook.sheet_names()
print(sheet_name)
lst = pd.read_excel('test.xlsx', sheet_name=0)
lst2 = pd.read_excel('test.xlsx', sheet_name=1)
print(lst.head(5))
# ignore_index 索引重置
print( | pd.concat([lst, lst2], axis=0, ignore_index=True) | pandas.concat |
import re
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
import pytest
from aneris.convenience import harmonise_all
from aneris.errors import (
AmbiguousHarmonisationMethod,
MissingHarmonisationYear,
MissingHistoricalError,
)
pytest.importorskip("pint")
import pint.errors
@pytest.mark.parametrize(
"method,exp_res",
(
(
"constant_ratio",
{
2010: 10 * 1.1,
2030: 5 * 1.1,
2050: 3 * 1.1,
2100: 1 * 1.1,
},
),
(
"reduce_ratio_2050",
{
2010: 11,
2030: 5 * 1.05,
2050: 3,
2100: 1,
},
),
(
"reduce_ratio_2030",
{
2010: 11,
2030: 5,
2050: 3,
2100: 1,
},
),
(
"reduce_ratio_2150",
{
2010: 11,
2030: 5 * (1 + 0.1 * (140 - 20) / 140),
2050: 3 * (1 + 0.1 * (140 - 40) / 140),
2100: 1 * (1 + 0.1 * (140 - 90) / 140),
},
),
(
"constant_offset",
{
2010: 10 + 1,
2030: 5 + 1,
2050: 3 + 1,
2100: 1 + 1,
},
),
(
"reduce_offset_2050",
{
2010: 11,
2030: 5 + 0.5,
2050: 3,
2100: 1,
},
),
(
"reduce_offset_2030",
{
2010: 11,
2030: 5,
2050: 3,
2100: 1,
},
),
(
"reduce_offset_2150",
{
2010: 11,
2030: 5 + 1 * (140 - 20) / 140,
2050: 3 + 1 * (140 - 40) / 140,
2100: 1 + 1 * (140 - 90) / 140,
},
),
(
"model_zero",
{
2010: 10 + 1,
2030: 5 + 1,
2050: 3 + 1,
2100: 1 + 1,
},
),
(
"hist_zero",
{
2010: 10,
2030: 5,
2050: 3,
2100: 1,
},
),
),
)
def test_different_unit_handling(method, exp_res):
idx = ["variable", "unit", "region", "model", "scenario"]
hist = pd.DataFrame(
{
"variable": ["Emissions|CO2"],
"unit": ["MtC / yr"],
"region": ["World"],
"model": ["CEDS"],
"scenario": ["historical"],
2010: [11000],
}
).set_index(idx)
scenario = pd.DataFrame(
{
"variable": ["Emissions|CO2"],
"unit": ["GtC / yr"],
"region": ["World"],
"model": ["IAM"],
"scenario": ["abc"],
2010: [10],
2030: [5],
2050: [3],
2100: [1],
}
).set_index(idx)
overrides = [{"variable": "Emissions|CO2", "method": method}]
overrides = pd.DataFrame(overrides)
res = harmonise_all(
scenarios=scenario,
history=hist,
harmonisation_year=2010,
overrides=overrides,
)
for year, val in exp_res.items():
npt.assert_allclose(res[year], val)
@pytest.fixture()
def hist_df():
idx = ["variable", "unit", "region", "model", "scenario"]
hist = pd.DataFrame(
{
"variable": ["Emissions|CO2", "Emissions|CH4"],
"unit": ["MtCO2 / yr", "MtCH4 / yr"],
"region": ["World"] * 2,
"model": ["CEDS"] * 2,
"scenario": ["historical"] * 2,
2010: [11000 * 44 / 12, 200],
2015: [12000 * 44 / 12, 250],
2020: [13000 * 44 / 12, 300],
}
).set_index(idx)
return hist
@pytest.fixture()
def scenarios_df():
idx = ["variable", "unit", "region", "model", "scenario"]
scenario = pd.DataFrame(
{
"variable": ["Emissions|CO2", "Emissions|CH4"],
"unit": ["GtC / yr", "GtCH4 / yr"],
"region": ["World"] * 2,
"model": ["IAM"] * 2,
"scenario": ["abc"] * 2,
2010: [10, 0.1],
2015: [11, 0.15],
2020: [5, 0.25],
2030: [5, 0.1],
2050: [3, 0.05],
2100: [1, 0.03],
}
).set_index(idx)
return scenario
@pytest.mark.parametrize("extra_col", (False, "mip_era"))
@pytest.mark.parametrize(
"harmonisation_year,scales",
(
(2010, [1.1, 2]),
(2015, [12 / 11, 25 / 15]),
),
)
def test_different_unit_handling_multiple_timeseries_constant_ratio(
hist_df,
scenarios_df,
extra_col,
harmonisation_year,
scales,
):
if extra_col:
scenarios_df[extra_col] = "test"
scenarios_df = scenarios_df.set_index(extra_col, append=True)
exp = scenarios_df.multiply(scales, axis=0)
overrides = [{"method": "constant_ratio"}]
overrides = pd.DataFrame(overrides)
res = harmonise_all(
scenarios=scenarios_df,
history=hist_df,
harmonisation_year=harmonisation_year,
overrides=overrides,
)
pdt.assert_frame_equal(res, exp)
@pytest.mark.parametrize(
"harmonisation_year,offset",
(
(2010, [1, 0.1]),
(2015, [1, 0.1]),
(2020, [8, 0.05]),
),
)
def test_different_unit_handling_multiple_timeseries_constant_offset(
hist_df,
scenarios_df,
harmonisation_year,
offset,
):
exp = scenarios_df.add(offset, axis=0)
overrides = [{"method": "constant_offset"}]
overrides = pd.DataFrame(overrides)
res = harmonise_all(
scenarios=scenarios_df,
history=hist_df,
harmonisation_year=harmonisation_year,
overrides=overrides,
)
pdt.assert_frame_equal(res, exp)
def test_different_unit_handling_multiple_timeseries_overrides(
hist_df,
scenarios_df,
):
harmonisation_year = 2015
exp = scenarios_df.sort_index()
for r in exp.index:
for c in exp:
if "CO2" in r[0]:
harm_year_ratio = 12 / 11
if c >= 2050:
sf = 1
elif c <= 2015:
# this custom pre-harmonisation year logic doesn't apply to
# offsets which seems surprising
sf = harm_year_ratio
else:
sf = 1 + (
(harm_year_ratio - 1) * (2050 - c) / (2050 - harmonisation_year)
)
exp.loc[r, c] *= sf
else:
harm_year_offset = 0.1
if c >= 2030:
of = 0
else:
of = harm_year_offset * (2030 - c) / (2030 - harmonisation_year)
exp.loc[r, c] += of
overrides = [
{"variable": "Emissions|CO2", "method": "reduce_ratio_2050"},
{"variable": "Emissions|CH4", "method": "reduce_offset_2030"},
]
overrides = pd.DataFrame(overrides)
res = harmonise_all(
scenarios=scenarios_df,
history=hist_df,
harmonisation_year=harmonisation_year,
overrides=overrides,
)
pdt.assert_frame_equal(res, exp, check_like=True)
def test_raise_if_variable_not_in_hist(hist_df, scenarios_df):
hist_df = hist_df[~hist_df.index.get_level_values("variable").str.endswith("CO2")]
error_msg = re.escape("No historical data for `World` `Emissions|CO2`")
with pytest.raises(MissingHistoricalError, match=error_msg):
harmonise_all(
scenarios=scenarios_df,
history=hist_df,
harmonisation_year=2010,
overrides=pd.DataFrame([{"method": "constant_ratio"}]),
)
def test_raise_if_region_not_in_hist(hist_df, scenarios_df):
hist_df = hist_df[~hist_df.index.get_level_values("region").str.startswith("World")]
error_msg = re.escape("No historical data for `World` `Emissions|CH4`")
with pytest.raises(MissingHistoricalError, match=error_msg):
harmonise_all(
scenarios=scenarios_df,
history=hist_df,
harmonisation_year=2010,
overrides=pd.DataFrame([{"method": "constant_ratio"}]),
)
def test_raise_if_incompatible_unit(hist_df, scenarios_df):
scenarios_df = scenarios_df.reset_index("unit")
scenarios_df["unit"] = "Mt CO2 / yr"
scenarios_df = scenarios_df.set_index("unit", append=True)
error_msg = re.escape(
"Cannot convert from 'megatCH4 / a' ([mass] * [methane] / [time]) to "
"'CO2 * megametric_ton / a' ([carbon] * [mass] / [time])"
)
with pytest.raises(pint.errors.DimensionalityError, match=error_msg):
harmonise_all(
scenarios=scenarios_df,
history=hist_df,
harmonisation_year=2010,
overrides=pd.DataFrame([{"method": "constant_ratio"}]),
)
def test_raise_if_undefined_unit(hist_df, scenarios_df):
scenarios_df = scenarios_df.reset_index("unit")
scenarios_df["unit"] = "Mt CO2eq / yr"
scenarios_df = scenarios_df.set_index("unit", append=True)
with pytest.raises(pint.errors.UndefinedUnitError):
harmonise_all(
scenarios=scenarios_df,
history=hist_df,
harmonisation_year=2010,
overrides=pd.DataFrame([{"method": "constant_ratio"}]),
)
def test_raise_if_harmonisation_year_missing(hist_df, scenarios_df):
hist_df = hist_df.drop(2015, axis="columns")
error_msg = re.escape(
"No historical data for year 2015 for `World` `Emissions|CH4`"
)
with pytest.raises(MissingHarmonisationYear, match=error_msg):
harmonise_all(
scenarios=scenarios_df,
history=hist_df,
harmonisation_year=2015,
overrides=pd.DataFrame([{"method": "constant_ratio"}]),
)
def test_raise_if_harmonisation_year_nan(hist_df, scenarios_df):
hist_df.loc[
hist_df.index.get_level_values("variable").str.endswith("CO2"), 2015
] = np.nan
error_msg = re.escape(
"Historical data is null for year 2015 for `World` `Emissions|CO2`"
)
with pytest.raises(MissingHarmonisationYear, match=error_msg):
harmonise_all(
scenarios=scenarios_df,
history=hist_df,
harmonisation_year=2015,
overrides=pd.DataFrame([{"method": "constant_ratio"}]),
)
def test_override_multi_level(hist_df, scenarios_df):
asia_hist = hist_df * 0.7
asia_hist.index = asia_hist.index.set_levels(["World|R5.2ASIA"], level="region")
hist_df = pd.concat([hist_df, asia_hist])
asia = scenarios_df.copy()
asia.index = asia.index.set_levels(["World|R5.2ASIA"], level="region")
model_2 = scenarios_df.copy()
model_2.index = model_2.index.set_levels(["FaNCY"], level="model")
scenario_2 = scenarios_df.copy()
scenario_2.index = scenario_2.index.set_levels(["EMF33 quick"], level="scenario")
scenarios_df = pd.concat([scenarios_df, asia, model_2, scenario_2])
overrides = pd.DataFrame(
[
{
"variable": "Emissions|CO2",
"region": "World",
"model": "IAM",
"scenario": "abc",
"method": "constant_ratio",
},
{
"variable": "Emissions|CH4",
"region": "World",
"model": "IAM",
"scenario": "abc",
"method": "constant_offset",
},
{
"variable": "Emissions|CO2",
"region": "World|R5.2ASIA",
"model": "IAM",
"scenario": "abc",
"method": "reduce_ratio_2030",
},
{
"variable": "Emissions|CH4",
"region": "World|R5.2ASIA",
"model": "IAM",
"scenario": "abc",
"method": "reduce_ratio_2050",
},
{
"variable": "Emissions|CO2",
"region": "World",
"model": "FaNCY",
"scenario": "abc",
"method": "reduce_ratio_2070",
},
{
"variable": "Emissions|CH4",
"region": "World",
"model": "FaNCY",
"scenario": "abc",
"method": "reduce_ratio_2090",
},
{
"variable": "Emissions|CO2",
"region": "World",
"model": "IAM",
"scenario": "EMF33 quick",
"method": "reduce_offset_2050",
},
{
"variable": "Emissions|CH4",
"region": "World",
"model": "IAM",
"scenario": "EMF33 quick",
"method": "reduce_offset_2070",
},
]
)
res = harmonise_all(
scenarios=scenarios_df,
history=hist_df,
harmonisation_year=2015,
overrides=overrides,
)
co2_rows = res.index.get_level_values("variable") == "Emissions|CO2"
world_rows = res.index.get_level_values("region") == "World"
fancy_rows = res.index.get_level_values("model") == "FaNCY"
emf33_rows = res.index.get_level_values("scenario") == "EMF33 quick"
atol = 1e-4
pick_rows = co2_rows & world_rows & ~fancy_rows & ~emf33_rows
npt.assert_allclose(
res.loc[pick_rows, :],
12 / 11 * scenarios_df.loc[pick_rows, :],
atol=atol,
)
npt.assert_allclose(
res.loc[~co2_rows & world_rows & ~fancy_rows & ~emf33_rows, :],
0.1 + scenarios_df.loc[~co2_rows & world_rows & ~fancy_rows & ~emf33_rows, :],
atol=atol,
)
npt.assert_allclose(
res.loc[co2_rows & ~world_rows & ~fancy_rows & ~emf33_rows, :].squeeze(),
[7.636363, 8.4, 4.21212121, 5, 3, 1],
atol=atol,
)
npt.assert_allclose(
res.loc[~co2_rows & ~world_rows & ~fancy_rows & ~emf33_rows, :].squeeze(),
[0.11667, 0.175, 0.285714, 0.109524, 0.05, 0.03],
atol=atol,
)
npt.assert_allclose(
res.loc[co2_rows & world_rows & fancy_rows & ~emf33_rows, :].squeeze(),
[10.909090, 12, 5.413233, 5.330579, 3.099174, 1],
atol=atol,
)
npt.assert_allclose(
res.loc[~co2_rows & world_rows & fancy_rows & ~emf33_rows, :].squeeze(),
[0.16667, 0.25, 0.405555, 0.15333, 0.067777, 0.03],
atol=atol,
)
npt.assert_allclose(
res.loc[co2_rows & world_rows & ~fancy_rows & emf33_rows, :].squeeze(),
[11.142857, 12, 5.857143, 5.571429, 3, 1],
atol=atol,
)
npt.assert_allclose(
res.loc[~co2_rows & world_rows & ~fancy_rows & emf33_rows, :].squeeze(),
[0.2090909, 0.25, 0.340909, 0.172727, 0.086364, 0.03],
atol=atol,
)
@pytest.mark.parametrize(
"overrides",
(
pd.DataFrame(
[
{"region": "World", "method": "constant_ratio"},
{"region": "World", "method": "constant_offset"},
]
),
pd.DataFrame(
[
{
"region": "World",
"variable": "Emissions|CH4",
"method": "constant_ratio",
},
{"region": "World", "method": "constant_offset"},
]
),
pd.DataFrame(
[
{"variable": "Emissions|CH4", "method": "constant_ratio"},
{"variable": "Emissions|CH4", "method": "reduce_offset_2030"},
]
),
pd.DataFrame(
[
{"variable": "Emissions|CH4", "method": "constant_ratio"},
{
"variable": "Emissions|CH4",
"model": "IAM",
"method": "reduce_offset_2030",
},
]
),
),
)
def test_multiple_matching_overrides(hist_df, scenarios_df, overrides):
with pytest.raises(
AmbiguousHarmonisationMethod, match="More than one override for metadata"
):
harmonise_all(
scenarios=scenarios_df,
history=hist_df,
harmonisation_year=2015,
overrides=overrides,
)
def test_defaults(hist_df, scenarios_df):
co2_afolu = scenarios_df[
scenarios_df.index.get_level_values("variable") == "Emissions|CO2"
].copy()
co2_afolu = co2_afolu.reset_index()
co2_afolu["variable"] = "Emissions|CO2|AFOLU"
co2_afolu = co2_afolu.set_index(scenarios_df.index.names)
co2_afolu.iloc[:, :] = [2, 0.5, -1, -1.5, -2, -3]
bc_afolu = scenarios_df[
scenarios_df.index.get_level_values("variable") == "Emissions|CO2"
].copy()
bc_afolu = bc_afolu.reset_index()
bc_afolu["variable"] = "Emissions|BC|AFOLU"
bc_afolu["unit"] = "Mt BC / yr"
bc_afolu = bc_afolu.set_index(scenarios_df.index.names)
bc_afolu.iloc[:, :] = [30, 33, 40, 42, 36, 24]
scenarios_df = pd.concat([scenarios_df, co2_afolu, bc_afolu])
co2_afolu_hist = hist_df[
hist_df.index.get_level_values("variable") == "Emissions|CO2"
].copy()
co2_afolu_hist = co2_afolu_hist.reset_index()
co2_afolu_hist["variable"] = "Emissions|CO2|AFOLU"
co2_afolu_hist = co2_afolu_hist.set_index(hist_df.index.names)
co2_afolu_hist.iloc[:, :] = [
1.5 * 44000 / 12,
1.6 * 44000 / 12,
1.7 * 44000 / 12,
]
bc_afolu_hist = hist_df[
hist_df.index.get_level_values("variable") == "Emissions|CO2"
].copy()
bc_afolu_hist = bc_afolu_hist.reset_index()
bc_afolu_hist["variable"] = "Emissions|BC|AFOLU"
bc_afolu_hist["unit"] = "Gt BC / yr"
bc_afolu_hist = bc_afolu_hist.set_index(hist_df.index.names)
bc_afolu_hist.iloc[:, :] = [20, 35, 28]
hist_df = | pd.concat([hist_df, co2_afolu_hist, bc_afolu_hist]) | pandas.concat |
from __future__ import division
import pandas as pd
def merge_subunits(genes):
""" Merge list of protein subunit genes into complex
Args:
genes (pandas.Series): list of genes
Returns:
str: boolean rule
"""
genes = genes.dropna()
if len(genes) == 0:
return None
else:
protein = ' and '.join(sorted(genes))
if len(genes) > 1:
return '(' + protein + ')'
else:
return protein
def merge_subunit_scores(scores):
""" Merge scores of all genes in a protein complex.
Calculates the mean score among all subunits.
Args:
scores: individual gene scores
Returns:
float: merged score
"""
return scores.fillna(0).mean()
def merge_proteins(proteins):
""" Merge all isozymes that catalyze a given reaction.
Automatically removes all isozymes with missing score.
Args:
proteins (pandas.Series): list of proteins
Returns:
str: boolean rule
"""
proteins = set(proteins.dropna())
if not proteins:
return None
gpr_str = ' or '.join(sorted(proteins))
if len(proteins) > 1:
return '(' + gpr_str + ')'
else:
return gpr_str
def merge_protein_scores(scores):
""" Merge scores of all isozymes that catalyze a given reaction.
Calculates the maximum score among all isozymes.
Args:
scores (pandas.Series): protein scores
Returns:
float: merged score
"""
return scores.max(skipna=True)
def reaction_scoring(annotation, gprs, spontaneous_score=0.0, debug_output=None):
""" Calculate reaction scores using new eggnog output.
Args:
annotation (pandas.DataFrame): gene annotation results
gprs (pandas.DataFrame): BiGG GPR rules
spontaneous_score (float): score to give to spontaneous reactions (default: 0.0)
Returns:
pandas.DataFrame: reaction scores
"""
# filter best match for each gene
gene2gene = annotation.sort_values(by='score', ascending=False) \
.groupby('BiGG_gene', as_index=False).apply(lambda x: x.iloc[0])
# merge with gpr table
gprs['BiGG_gene'] = gprs.apply(lambda row: '{}.{}'.format(row['model'], row['gene'][2:]), axis=1)
gene_scores = | pd.merge(gene2gene, gprs, how='right') | pandas.merge |
"""
Test our groupby support based on the pandas groupby tests.
"""
#
# This file is licensed under the Pandas 3 clause BSD license.
#
from sparklingpandas.test.sp_test_case import \
SparklingPandasTestCase
from pandas import bdate_range
from pandas.core.index import Index, MultiIndex
from pandas.core.api import DataFrame
from pandas.core.series import Series
from pandas.util.testing import assert_frame_equal
from pandas import compat
import pandas.util.testing as tm
import unittest2
import numpy as np
try:
# rands was moved to util.testing in pandas 0.15
from pandas.core.common import rands # pylint: disable=no-name-in-module
except ImportError:
from pandas.util.testing import rands
class PandasGroupby(SparklingPandasTestCase):
def setUp(self):
"""
Setup the dataframes used for the groupby tests derived from pandas
"""
self.date_rng = bdate_range('1/1/2005', periods=250)
self.string_idx = Index([rands(8).upper() for x in range(250)])
self.group_id = Series([x[0] for x in self.string_idx],
index=self.string_idx)
self.group_dict = dict((key, value) for key, value in
compat.iteritems(self.group_id))
self.col_idx = Index(['A', 'B', 'C', 'D', 'E'])
rand_matrix = np.random.randn(250, 5)
self.string_matrix = DataFrame(rand_matrix, columns=self.col_idx,
index=self.string_idx)
self.time_matrix = DataFrame(rand_matrix, columns=self.col_idx,
index=self.date_rng)
self.time_series = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.pd_df_foobar = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
self.df_mixed_floats = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(np.random.randn(8),
dtype='float32')})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mframe = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self.three_group = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
super(self.__class__, self).setUp()
def test_first_last_nth(self):
# tests for first / last / nth
ddf = self.psc.from_pd_data_frame(self.pd_df_foobar)
assert_frame_equal(ddf.collect(), self.pd_df_foobar)
grouped = self.psc.from_pd_data_frame(self.pd_df_foobar).groupby('A')
first = grouped.first().collect()
expected = self.pd_df_foobar.ix[[1, 0], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
nth = grouped.nth(0).collect()
assert_frame_equal(nth, expected)
last = grouped.last().collect()
expected = self.pd_df_foobar.ix[[5, 7], ['B', 'C', 'D']]
expected.index = | Index(['bar', 'foo'], name='A') | pandas.core.index.Index |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import LogNorm
import matplotlib
import os
import sys
import appaloosa
import pandas as pd
import datetime
import warnings
from scipy.optimize import curve_fit, minimize
from astropy.stats import funcs
import emcee
import corner
import appaloosa.analysis as analysis
def paper2_plots(condorfile='condorout.dat.gz', debug=False,
kicfile='kic.txt.gz', statsfile='stats.txt',
figdir='figures2/', figtype='.pdf', rerun=False, oldplot=True):
'''
Paper 2: flares vs ages
PREVIOUSLY: Run on WWU workstation in dir: ~/research/kepler-flares/
NOW: Run on UW Workstation in dir: /Volumes/Davenport WWU-1/Data/WWU_20180618/research/kepler-flares/
'''
# if doing the re-run (make FFD for all objects) then do all the old extra plots too
if rerun:
oldplot = True
# read in KIC file
# http://archive.stsci.edu/pub/kepler/catalogs/ <- data source
# http://archive.stsci.edu/kepler/kic10/help/quickcol.html <- info
print('RUNNING PAPER2_PLOTS')
print('reading in ' ,datetime.datetime.now())
kicdata = | pd.read_csv(kicfile, delimiter='|') | pandas.read_csv |
import pandas as pd
def merge_data(left_df, right_df, date_col="date"):
# get clean copies of data with date format
left_df = left_df.copy()
left_df[date_col] = | pd.to_datetime(left_df[date_col]) | pandas.to_datetime |
import sys
import numpy
import pandas as pd
import constants as kk
from pyswarm import pso
import os
import input
import datetime as dt
def preparation():
project_path = 'C:\\Users\\FrancescoBaldi\\switchdrive\\Work in progress\\Paper 0\\Ecos2015PaperExtension\\'
path_files = project_path + os.sep
sys.path.append(path_files)
filenames = input.filenames(project_path) # Note: this is just a test
CONSTANTS = kk.constantsSetting()
CONSTANTS["filenames"] = filenames
processed_temp = pd.read_hdf(CONSTANTS["filenames"]["dataset_output"], 'processed')
dataset_raw = pd.read_hdf(filenames["dataset_raw"], 'table')
data = | pd.DataFrame(index=processed_temp.index) | pandas.DataFrame |
import argparse
from ast import literal_eval
from astropy.io import fits
import base64
from bson.json_util import loads
import confluent_kafka
from copy import deepcopy
import datetime
import fastavro
import gzip
import io
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt
import multiprocessing
import numpy as np
import os
import pandas as pd
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import subprocess
import sys
from tensorflow.keras.models import load_model
import time
import traceback
from utils import (
datetime_to_jd,
deg2dms,
deg2hms,
great_circle_distance,
in_ellipse,
load_config,
Mongo,
radec2lb,
time_stamp,
)
''' load config and secrets '''
config = load_config(config_file='config.yaml')['kowalski']
DEFAULT_TIMEOUT = 5 # seconds
class TimeoutHTTPAdapter(HTTPAdapter):
def __init__(self, *args, **kwargs):
self.timeout = DEFAULT_TIMEOUT
if "timeout" in kwargs:
self.timeout = kwargs["timeout"]
del kwargs["timeout"]
super().__init__(*args, **kwargs)
def send(self, request, **kwargs):
timeout = kwargs.get("timeout")
if timeout is None:
kwargs["timeout"] = self.timeout
return super().send(request, **kwargs)
def read_schema_data(bytes_io):
"""Read data that already has an Avro schema.
Parameters
----------
bytes_io : `_io.BytesIO`
Data to be decoded.
Returns
-------
`dict`
Decoded data.
"""
bytes_io.seek(0)
message = fastavro.reader(bytes_io)
return message
class EopError(Exception):
"""
Exception raised when reaching end of partition.
Parameters
----------
msg : Kafka message
The Kafka message result from consumer.poll().
"""
def __init__(self, msg):
message = f'{time_stamp()}: topic:{msg.topic()}, partition:{msg.partition()}, '\
f'status:end, offset:{msg.offset()}, key:{str(msg.key())}\n'
self.message = message
def __str__(self):
return self.message
def log(message):
print(f"{time_stamp()}: {message}")
def make_photometry(alert: dict, jd_start: float = None):
"""Make a de-duplicated pandas.DataFrame with photometry of alert['objectId']
:param alert: ZTF alert packet/dict
:param jd_start:
:return:
"""
alert = deepcopy(alert)
df_candidate = pd.DataFrame(alert['candidate'], index=[0])
df_prv_candidates = | pd.DataFrame(alert['prv_candidates']) | pandas.DataFrame |
from collections import Counter
from importlib.machinery import SourceFileLoader
import numpy as np
from os.path import join
import warnings
warnings.filterwarnings("ignore")
import nltk
nltk.download('punkt')
import seaborn as sns
import matplotlib
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.porter import *
from nltk.corpus import stopwords
nltk.download('stopwords' ,quiet=True)
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import pandas as pd
from sklearn.metrics import multilabel_confusion_matrix
from sklearn import metrics
import gdown
import string
import numpy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
from keras.callbacks import EarlyStopping
from keras.layers import Dropout
import re
from nltk.corpus import stopwords
from nltk import word_tokenize
STOPWORDS = set(stopwords.words('english'))
from bs4 import BeautifulSoup
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import string
def get_finance_train():
df_train = pd.read_csv("finance_train.csv")
return df_train
def get_finance_test():
df_test = pd.read_csv("finance_test.csv")
return df_test
PUNCTUATION = '!#$%&()*,-./:;<=>?@^_`{|}~'
REPLACE_BY_SPACE_RE = re.compile('[/(){}\[\]\|@,;]')
BAD_SYMBOLS_RE = re.compile('[^0-9a-z #+_]')
STOPWORDS = set(stopwords.words('english'))
def clean_text(text):
"""
text: a string
return: modified initial string
"""
text = text.lower()
text = REPLACE_BY_SPACE_RE.sub(' ', text)
text = BAD_SYMBOLS_RE.sub('', text)
text = text.replace('x', '')
text = ' '.join(word for word in text.split() if word not in STOPWORDS)
return text
def pad_sequences_train(df_train, df_test):
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True)
tokenizer.fit_on_texts(df_train['Sentence'].values)
word_index = tokenizer.word_index
X = tokenizer.texts_to_sequences(df_train['Sentence'].values)
X_train = pad_sequences(X, maxlen=MAX_SEQUENCE_LENGTH)
return X_train
def pad_sequences_test(df_train, df_test):
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True)
tokenizer.fit_on_texts(df_train['Sentence'].values)
word_index = tokenizer.word_index
X = tokenizer.texts_to_sequences(df_test['Sentence'].values)
X_test = pad_sequences(X, maxlen=MAX_SEQUENCE_LENGTH)
return X_test
def run_model(X_train, y_train, X_test, y_test, epochs=5, max_sequence_length=256, max_nb_words=1000, embedding_dim=300):
if any(x is None for x in [X_train, y_train, X_test, y_test, epochs, max_sequence_length, max_nb_words, embedding_dim]):
print('Replace the None values above with your new values before calling the run_model() function.')
return None, None, None
model = Sequential()
model.add(Embedding(max_nb_words+1, embedding_dim, mask_zero=True, input_length=max_sequence_length))
model.add(SpatialDropout1D(0.2))
model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(n_labels, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
history = model.fit(X_train,
y_train,
epochs=epochs,
batch_size=batch_size,
validation_split=0.1,
callbacks=[EarlyStopping(monitor='val_loss', patience=3, min_delta=0.0001)])
test_loss, test_accuracy = model.evaluate(X_test, y_test)
return model, history, test_accuracy
EMBEDDING_DIM = 300
nltk.download('wordnet')
gdown.download('https://drive.google.com/uc?id=1q4U2gVY9tWEPdT6W-pdQpKmo152QqWLE', 'finance_train.csv', True)
gdown.download('https://drive.google.com/uc?id=1nIBqAsItwVEGVayYTgvybz7HeK0asom0', 'finance_test.csv', True)
print ("Train & Test Files are loaded")
df_train = get_finance_train()
df_train.head()
df_test = get_finance_test()
print(df_test)
n_labels = 3
label_map = {0 : "negative",
1 : "neutral",
2 : "positive"}
model = Sequential()
model.add(Embedding(MAX_NB_WORDS+1, EMBEDDING_DIM, mask_zero=True, input_length=MAX_SEQUENCE_LENGTH))
model.add(SpatialDropout1D(0.2))
model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(n_labels, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
epochs = 8
batch_size = 32
X_train= pad_sequences_train(df_train, df_test)
print(X_train[0])
X_test= pad_sequences_test(df_test, df_test)
print(X_test[0])
y_train = | pd.get_dummies(df_train['Label']) | pandas.get_dummies |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like(mock):
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
assert not is_file(mock.Mock())
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, compat.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
# Dummy class used for testing with Python objects
class Dummy():
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
assert result == 'integer'
result = lib.infer_dtype([])
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'decimal'
def test_string(self):
pass
def test_unicode(self):
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'unicode' if PY2 else 'string'
assert result == expected
@pytest.mark.parametrize('dtype, missing, skipna, expected', [
(float, np.nan, False, 'floating'),
(float, np.nan, True, 'floating'),
(object, np.nan, False, 'floating'),
(object, np.nan, True, 'empty'),
(object, None, False, 'mixed'),
(object, None, True, 'empty')
])
@pytest.mark.parametrize('box', [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([n, pd.Period('2011-01', freq='D'), n])
assert lib.infer_dtype(arr) == 'period'
# different type of nat
arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
@pytest.mark.parametrize(
"data",
[
[datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],
[Timestamp("20170612"), Timestamp("20170311")],
[Timestamp("20170612", tz='US/Eastern'),
Timestamp("20170311", tz='US/Eastern')],
[date(2017, 6, 12),
Timestamp("20170311", tz='US/Eastern')],
[np.datetime64("2017-06-12"), np.datetime64("2017-03-11")],
[np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)]
]
)
def test_infer_datetimelike_array_datetime(self, data):
assert lib.infer_datetimelike_array(data) == "datetime"
@pytest.mark.parametrize(
"data",
[
[timedelta(2017, 6, 12), timedelta(2017, 3, 11)],
[timedelta(2017, 6, 12), date(2017, 3, 11)],
[np.timedelta64(2017, "D"), np.timedelta64(6, "s")],
[np.timedelta64(2017, "D"), timedelta(2017, 3, 11)]
]
)
def test_infer_datetimelike_array_timedelta(self, data):
assert lib.infer_datetimelike_array(data) == "timedelta"
def test_infer_datetimelike_array_date(self):
arr = [date(2017, 6, 12), date(2017, 3, 11)]
assert lib.infer_datetimelike_array(arr) == "date"
@pytest.mark.parametrize(
"data",
[
["2017-06-12", "2017-03-11"],
[20170612, 20170311],
[20170612.5, 20170311.8],
[Dummy(), Dummy()],
[Timestamp("20170612"), Timestamp("20170311", tz='US/Eastern')],
[Timestamp("20170612"), 20170311],
[timedelta(2017, 6, 12), Timestamp("20170311", tz='US/Eastern')]
]
)
def test_infer_datetimelike_array_mixed(self, data):
assert lib.infer_datetimelike_array(data) == "mixed"
@pytest.mark.parametrize(
"first, expected",
[
[[None], "mixed"],
[[np.nan], "mixed"],
[[pd.NaT], "nat"],
[[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"],
[[np.datetime64("2017-06-12"), pd.NaT], "datetime"],
[[date(2017, 6, 12), pd.NaT], "date"],
[[timedelta(2017, 6, 12), pd.NaT], "timedelta"],
[[np.timedelta64(2017, "D"), pd.NaT], "timedelta"]
]
)
@pytest.mark.parametrize("second", [None, np.nan])
def test_infer_datetimelike_array_nan_nat_like(self, first, second,
expected):
first.append(second)
assert lib.infer_datetimelike_array(first) == expected
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr) == 'floating'
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr) == 'mixed'
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([pd.NaT, np.nan])
assert | lib.infer_dtype(arr) | pandas._libs.lib.infer_dtype |
from datetime import datetime
import unittest
import numpy as np
import pandas.core.datetools as datetools
from pandas.core.daterange import DateRange, XDateRange
####
## XDateRange Tests
####
def eqXDateRange(kwargs, expected):
assert(np.array_equal(list(XDateRange(**kwargs)), expected))
def testXDateRange1():
eqXDateRange(dict(start = datetime(2009, 3, 25),
nPeriods = 2),
[datetime(2009, 3, 25), datetime(2009, 3, 26)])
def testXDateRange2():
eqXDateRange(dict(start = datetime(2008, 1, 1),
end = datetime(2008, 1, 3)),
[datetime(2008, 1, 1),
datetime(2008, 1, 2),
datetime(2008, 1, 3)])
def testXDateRange3():
eqXDateRange(dict(start = datetime(2008, 1, 5),
end = datetime(2008, 1, 6)),
[])
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestXDateRange(unittest.TestCase):
def test_constructor(self):
rng = XDateRange(START, END, offset=datetools.bday)
self.assertEquals(rng.timeRule, 'WEEKDAY')
rng = XDateRange(START, END, timeRule='WEEKDAY')
self.assertEquals(rng.offset, datetools.bday)
class TestDateRange(unittest.TestCase):
def setUp(self):
self.rng = DateRange(START, END, offset=datetools.bday)
def test_constructor(self):
rng = DateRange(START, END, offset=datetools.bday)
rng = | DateRange(START, periods=20, offset=datetools.bday) | pandas.core.daterange.DateRange |
# Copyright (c) <NAME>
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import chronos_utils
import pandas as pd
import numpy as np
import torch
from torch.optim import Rprop
from torch.distributions import constraints
import pyro
import pyro.distributions as dist
from pyro.optim import ExponentialLR
from pyro.infer import SVI, Trace_ELBO, Predictive, JitTrace_ELBO
from pyro.infer.autoguide import AutoDelta
from pyro.infer.autoguide.initialization import init_to_feasible
import warnings
pyro.enable_validation(True)
class Chronos:
'''
Parameters:
------------
method="MAP" - [str] The estimation method used. Currently only
supports one of "MAP" (Maximum A Posteriori), or "MLE"
(Maximum Likelihood Estimation). If "MLE" is chosen,
'changepoint_prior_scale' is ignored.
Default is "MAP"
n_changepoints - [int] The number of changepoints the model considers
when fitting to the data, must be 0 or larger.
Changepoints are points in time when the slope of the
trend can change. More changepoints will allow for a
better fit, but will also increase uncertainty when
predicting into the future.
Default is 20
year_seasonality_order - [int] The fourier order used to predict yearly
seasonality. Must be 0 or larger. Larger values will
allow for a better fit for yearly seasonality but
increase the odds of overfitting, as well as fitting
time. Setting this value to 0 implies there is no
yearly seasonality.
Default is 10
month_seasonality_order - [int] The fourier order used to predict monthly
seasonality. Must be 0 or larger. Larger values
will allow for a better fit for monthly seasonality
but increase the odds of overfitting, as well as
fitting time. Setting this value to 0 implies there
is no monthly seasonality.
Default is 5
weekly_seasonality_order - [int] The fourier order used to predict weekly
seasonality. Must be 0 or larger. Larger values will
allow for a better fit for weekly seasonality but
increase the odds of overfitting, as well as fitting
time. Setting this value to 0 implies there is no
weekly seasonality.
Default is 3
learning_rate - [float] The learning rate used for optimization when
the optimization method is "MAP" or "MLE". Most be
larger than 0. Larger values make the algorithm learn
faster but might produce worse solutions. Smaller
values allow for better convergence but will require
more iterations to be specified at [max_iter].
Default is 0.01
changepoint_range - [float] The range of the historical data to apply
changepoints to. Must be between 0.0 - 1.0. 0.8
would mean only considering changepoints for the
first 80% of historical data. Larger values would
provide better fit, but would also be more
sensitive to recent changes which may or may not
indicate trends.
Default is 0.8
changepoint_prior_scale - [float] the scale for the changepoint value
prior distribution. Must be larger than 0.0. The
changepoints are assumed to come from a Laplace
distribution which is centered at 0 and is specified
by the scale value. Larger values for the scale
allow for more changepoints with larger changes, and
may increase fit, but will also increase the
uncertainty in future predictions.
Default is 0.05
distribution - [string] The distribution which describes the
behaviour of the data (mainly of the residuals)
at each timestamp. Supported distributions are:
"Normal" - The normal (Guassian) distribution
"StudentT" - Student's t-distribution. Unlike the
normal distribution it has fatter
tails, so can be more resistent to
outliers
"Gamma" - The gamma distribution. Only has
support for positive values.
Default is "Normal"
seasonality_mode - [string] Whether seasonality is an additive quantity
("add") or a multiplicative one ("mul"). If
seasonality is specified as "add", the seasonal
components are added to the trend. For example, if
every Saturday you see 5 additiona clients, that
seasonal component is additive. If the seasonality
is specified as "mul", the seasonal components are
multiplied by the trend. For example, if every
Saturday you see a 50% increase in clients, that
seasonal component is multiplicative as it depends
on the number of clients already expected.
Default is "add"
max_iter - [int] The maximum number of iterations the algorithm
is allowed to run for. Must be larger than 0. Chronos
employes an optimization based approach for the "MAP"
and "MLE" method, and this parameter determines how
long the optimization algorithm can run for. Larger
values will increase run-time, but will lead to better
results. Smaller learning_rate values require larger
max_iter values.
Default is 1000.
Example Usage:
----------------
# Create sample data
>>> import chronos_plotting
>>> from chronos import Chronos
>>> x = np.array(range(365*4))
>>> my_df = pd.DataFrame({"ds": pd.date_range(start="2016-01-01",
periods=365*4,
freq='d'),
"y": 0.01 * x + np.sin(x/30)})
>>> print(my_df.head())
ds y
0 2016-01-01 0.000000
1 2016-01-02 0.043327
2 2016-01-03 0.086617
3 2016-01-04 0.129833
4 2016-01-05 0.172939
>>> my_chronos = Chronos()
>>> my_chronos.fit(my_df)
Employing Maximum A Posteriori
100.0% - ELBO loss: -2.4531 | Mean Absolute Error: 0.2296
>>> predictions = my_chronos.predict(period=31)
Prediction no: 1000
>>> chronos_plotting.plot_components(predictions, my_chronos)
... plot appears
'''
def __init__(self,
method="MAP",
n_changepoints = 20,
year_seasonality_order=10,
month_seasonality_order=5,
weekly_seasonality_order=3,
learning_rate=0.01,
changepoint_range = 0.8,
changepoint_prior_scale = 0.05,
distribution = "Normal",
seasonality_mode = "add",
max_iter=1000):
'''
The initialization function. See class docstring for an in-depth explanation
of all parameters
'''
self.__method = self.__check_is_string_supported(method,
"method",
chronos_utils.SUPPORTED_METHODS)
self.__seasonality_mode = self.__check_is_string_supported(seasonality_mode,
"seasonality_mode",
["add", "mul"])
self.__y_likelihood_distribution = self.__check_is_string_supported(distribution,
"distribution",
chronos_utils.SUPPORTED_DISTRIBUTIONS)
self.__number_of_changepoints = self.__check_is_supported_integer(n_changepoints,
"n_changepoints",
positive=False)
self.__year_seasonality_fourier_order = self.__check_is_supported_integer(year_seasonality_order,
"year_seasonality_order",
positive=False)
self.__weekly_seasonality_fourier_order = self.__check_is_supported_integer(weekly_seasonality_order,
"weekly_seasonality_order",
positive=False)
self.__month_seasonality_fourier_order = self.__check_is_supported_integer(month_seasonality_order,
"month_seasonality_order",
False)
self.__number_of_iterations = self.__check_is_supported_integer(max_iter,
"max_iter",
positive=True)
self.__learning_rate = self.__check_is_supported_float(learning_rate,
"learning_rate",
positive=True)
self.__changepoint_prior_scale = self.__check_is_supported_float(changepoint_prior_scale,
"changepoint_prior_scale",
positive=True)
self.__proportion_of_data_subject_to_changepoints = self.__check_is_supported_float(changepoint_range,
"changepoint_range",
positive=False)
if (self.__proportion_of_data_subject_to_changepoints > 1.0):
raise ValueError("changepoint_range must be less than, or equal to, 1.0")
self.__y_max = None
self.__history_min_time_seconds = None
self.__history_max_time_seconds = None
self.__prediction_verbose = False
self.__multiplicative_seasonalities = []
self.__multiplicative_additional_regressors = []
self.__additive_seasonalities = []
self.__additive_additional_regressors = []
self.__reserved_names = [f"trend{suffix}" for suffix in ["", "_upper", "_lower"]]
self.__reserved_names.extend([f"yhat{suffix}" for suffix in ["", "_upper", "_lower"]])
self.__add_default_seasonalities()
# Some distributions will require extra work to ensure the
# parameters fed in are positive
if (self.__y_likelihood_distribution in chronos_utils.POSITIVE_DISTRIBUTIONS):
self.__make_likelihood_mean_positive = True
else:
self.__make_likelihood_mean_positive = False
########################################################################################################################
def __check_is_string_supported(self, variable, variable_name, options):
'''
Checks if the string supplied is one of the options, and throws
a value error if it is not
Parameters:
------------
variable - [str] A string which is the value
variable_name - [str] A string which is the variable name. Is included
for output purposes
options - [list] A list of strings which contains the available
options for the given variable
Returns:
------------
variable - [str] The inputted variable, unmodified, if no errors
occured
'''
if (not isinstance(variable, str)):
raise TypeError(f"A value of {variable} for {variable_name} is not supported. Supported values are: {options}")
if (variable not in options):
raise ValueError(f"A value of {variable} for {variable_name} is not supported. Supported values are: {options}")
else:
return variable
########################################################################################################################
def __check_is_supported_float(self, variable, variable_name, positive=True):
'''
Checks if the float provided is supported, and throws an error if it
is not
Parameters:
------------
variable - [float] An integer which is the value
variable_name - [str] A string which is the variable name. Is included
for output purposes
positive - [bool] A flag denoting if only positive values are
supported or all non-negatives are
Returns:
------------
variable - [float] The inputted variable, unmodified, if no errors
occured
'''
if (positive == True):
error_message = f"{variable_name} must be a positive float"
else:
error_message = f"{variable_name} must be a non-negative float"
if (not isinstance(variable, float)):
if (not isinstance(variable, int)):
raise TypeError(error_message)
elif (isinstance(variable, bool)):
raise TypeError(error_message)
elif (positive == True):
if (variable <= 0.0):
raise ValueError(error_message)
elif (positive == False):
if (variable < 0.0):
raise ValueError(error_message)
return variable
########################################################################################################################
def __check_is_supported_integer(self, variable, variable_name, positive=True):
'''
Checks if the integer provided is supported, and throws an error if it
is not
Parameters:
------------
variable - [int] An integer which is the value
variable_name - [str] A string which is the variable name. Is included
for output purposes
positive - [bool] A flag denoting if only positive values are
supported or all non-negatives are
Returns:
------------
variable - [int] The inputted variable, unmodified, if no errors
occured
'''
if (positive == True):
error_message = f"{variable_name} must be a positive integer"
else:
error_message = f"{variable_name} must be a non-negative integer"
if (not isinstance(variable, int)):
raise TypeError(error_message)
elif (isinstance(variable, bool)):
raise TypeError(error_message)
elif (positive == True):
if (variable <= 0):
raise ValueError(error_message)
elif (positive == False):
if (variable < 0):
raise ValueError(error_message)
return variable
########################################################################################################################
def __add_default_seasonalities(self):
'''
A function to add the built-in default seasonalities
Parameters:
------------
None
Returns:
------------
None
'''
self.add_seasonality("yearly",
self.__year_seasonality_fourier_order,
self.__yearly_cycle_extraction_function,
self.__seasonality_mode)
self.add_seasonality("monthly",
self.__month_seasonality_fourier_order,
self.__monthly_cycle_extraction_function,
self.__seasonality_mode)
self.add_seasonality("weekly",
self.__weekly_seasonality_fourier_order,
self.__weekly_cycle_extraction_function,
self.__seasonality_mode)
########################################################################################################################
def __is_regressor_name_available(self, regressor_name):
'''
A function which checks to see if the regressor name is available.
Parameters:
------------
regressor_name - [str] A string which is the regressor name
Returns:
------------
is_available - [bool] True if the name is available,
False otherwise
'''
if (regressor_name in self.__additive_additional_regressors) or \
(regressor_name in self.__multiplicative_additional_regressors) or \
(regressor_name in self.__reserved_names):
return False
else:
return True
########################################################################################################################
def add_regressors(self, regressor_name, regressor_method="add"):
'''
A function which adds the name of a regressor that should
be considered when fitting the model. The regressor can be
either additive or multiplicative
Parameters:
------------
regressor_name - [str] A string denoting the name of
the regressor. Cannot be one of the
names 'yhat', 'yhat_upper', 'yhat_lower',
'trend', 'trend_upper', or 'trend_lower'.
Also cannot be the name of a previously
added regressor, i.e. two regressors
cannot have the same name
regressor_method - [str] either additive "add" or multiplicative "mul".
Specifies the mode of regressor incorporation
Returns:
------------
None
'''
# First check the name is available
if (not self.__is_regressor_name_available(regressor_name)):
raise ValueError(f"Name {regressor_name} is already in use")
# Now add it to the appropriate bucket
if (regressor_method == "add"):
self.__additive_additional_regressors.append(regressor_name)
elif (regressor_method == "mul"):
self.__multiplicative_additional_regressors.append(regressor_name)
else:
raise ValueError(f"method {regressor_method} is not supported, supported methods are 'add' or 'mul'")
########################################################################################################################
def __weekly_cycle_extraction_function(self, date_column):
'''
A function which extracts the weekly cycle from the date column provided
to it.
Parameters:
------------
date_column - [pd.Series] A pandas series of date type which supports
datetime functions.
Returns:
------------
normalized_data - [pd.Series] A pandas series with values in the half
open interval [0, 1.0) where 1.0 would correspond
to a full cycle, and each value correspond to the
position in the cycle.
'''
day_of_week = date_column.dt.dayofweek # days already start at 0
if (self.__trained_on_weekend):
normalized_data = day_of_week/7 # normalize data to be between 0.0 and 1.0
else:
normalized_data = day_of_week/5 # normalize data to be between 0.0 and 1.0
return normalized_data
########################################################################################################################
def __monthly_cycle_extraction_function(self, date_column):
'''
A function which extracts the monthly cycle from the date column provided
to it.
Parameters:
------------
date_column - [pd.Series] A pandas series of date type which supports
datetime functions.
Returns:
------------
normalized_data - [pd.Series] A pandas series with values in the half
open interval [0, 1.0) where 1.0 would correspond
to a full cycle, and each value correspond to the
position in the cycle.
'''
day_of_month = date_column.dt.day - 1 # make days start at 0
normalized_data = day_of_month/31 # normalize data to be between 0.0 and 1.0
return normalized_data
########################################################################################################################
def __yearly_cycle_extraction_function(self, date_column):
'''
A function which extracts the yearly cycle from the date column provided
to it.
Parameters:
------------
date_column - [pd.Series] A pandas series of date type which supports
datetime functions.
Returns:
------------
normalized_data - [pd.Series] A pandas series with values in the half
open interval [0, 1.0) where 1.0 would correspond
to a full cycle, and each value correspond to the
position in the cycle.
'''
day_of_year = date_column.dt.dayofyear - 1 # make days start at 0
normalized_data = day_of_year/366 # normalize data to be between 0.0 and 1.0
return normalized_data
########################################################################################################################
def add_seasonality(self, seasonality_name, fourier_order, cycle_extraction_function, seasonality_mode="add"):
'''
A function to add a requested seasonality to the inference process. The seasonality is
added to a list of seasonalities to construct, but is not constructed here. It is
constructed during the .fit method.
Parameters:
------------
seasonality_name - [str] The name of the seasonality. Must be
a unique name and cannot be one of the
reserved names. e.g. 'yearly', 'monthly',
or 'daily'.
fourier_order - [int] The fourier order of the seasonality.
Must be a positive integer. Higher values allow
for better fitting, but might also overfit.
cycle_extraction_function - [callable] A function which implements cycle
extraction from a date column. It should
accept a pandas series of datetime and
return a pandas series of the same size with
values from the half open interval [0.0, 1.0)
seasonality_mode - [str] The mode of this seasonality. Must be
either "add" or "mul".
Returns:
------------
None
'''
if (not self.__is_regressor_name_available(seasonality_name)):
raise ValueError(f"Name {seasonality_name} is already in use")
seasonality_information = {"name": seasonality_name,
"order": fourier_order,
"extraction_function":cycle_extraction_function}
if (seasonality_mode == "add"):
self.__additive_seasonalities.append(seasonality_information)
elif (seasonality_mode == "mul"):
self.__multiplicative_seasonalities.append(seasonality_information)
else:
raise ValueError(f"Seasonality mode {seasonality_mode} is unsupported. Must be one of 'add' or 'mul'")
self.__reserved_names.append(seasonality_name)
########################################################################################################################
def __transform_data(self, data):
'''
A function which takes the raw data containing the timestamp and
target column, and returns tensors for the trend, seasonality, and additional
components
Parameters:
------------
data - [DataFrame] The dataframe with the raw data.
Must contain at least one column with the
timestamp (with dtype np.datetime64). It
can optionally also contain the
target column
Returns:
------------
X_time - [tensor] A tensor of shape (n_samples, ),
where n_samples is the number of samples in
data
X_dataframe - [pd.DataFrame] - A pandas dataframe of all
columns except the target column if it is present
y - [tensor] A tensor of shape (n_samples, ),
where n_samples is the number of samples in
data, or None if there is no target column in
the original data
'''
# make a copy to avoid side effects of changing the original df
internal_data = data.copy()
for regressor_list in [self.__additive_additional_regressors, self.__multiplicative_additional_regressors]:
for regressor_name in regressor_list:
if regressor_name not in internal_data.columns.values:
raise KeyError(f"Could not find regressor '{regressor_name}' in data provided")
if (self.__target_col in internal_data.columns):
X_dataframe = internal_data.drop(self.__target_col, axis=1)
else:
X_dataframe = internal_data
# Convert ms values to seconds
internal_data[self.__time_col] = internal_data[self.__time_col].values.astype(float)/1e9
if (self.__history_min_time_seconds is None):
self.__history_min_time_seconds = internal_data[self.__time_col].min()
self.__history_max_time_seconds = internal_data[self.__time_col].max()
# Make time column go from 0 to 1
internal_data[self.__time_col] = internal_data[self.__time_col] - self.__history_min_time_seconds
internal_data[self.__time_col] = internal_data[self.__time_col]/(self.__history_max_time_seconds - self.__history_min_time_seconds)
X_time = torch.tensor(internal_data[self.__time_col].values, dtype=torch.float32)
# we only want to define y_max once
if (self.__y_max is None):
self.__y_max = internal_data[self.__target_col].max()
# If we don't have a target column (i.e. we're predicting), don't try and grab it
if (self.__target_col in internal_data.columns):
# Possion distribution requires counts, so we don't want to scale for it
if (self.__y_likelihood_distribution not in [chronos_utils.Poisson_dist_code]):
y_values = internal_data[self.__target_col].values/self.__y_max
else:
y_values = internal_data[self.__target_col].values
y = torch.tensor(y_values, dtype=torch.float32)
else:
y = None
return X_time, X_dataframe, y
########################################################################################################################
def __find_changepoint_positions(self, X_time, changepoint_num, changepoint_range, min_value = None, drop_first = True):
'''
A function which takes a tensor of the time, expressed in days, and the
number oc changepoints to find, and finds the desired number of changepoints.
Parameters:
------------
X_time - [tensor] A tensor of the time, expressed in seconds. The
seconds need not be consecutive, or evenly spaced.
changepoint_num - [int] The number of changepoints to find
changepoint_range - [float] The range of the available times to consider. A
value between 0.0 and 1.0. 0.8 means only the first 80%
of the range is considered
min_value - [int] The timepoint which describes the beginning of the
range where changepoints can be found. Default is None,
which means the first measurement sets the beginning of
the range
drop_first - [bool] Whether to drop the first measurement found.
When True, this prevents from the first measurement of
being considered as a changepoint (we don't want the
first second to be a changepoint usually)
Returns:
------------
changepoints - [tensor] A tensor of shape (changepoint_num, ) where each
entry is a day where a changepoint can happen. The
changepoints are chosen to be evenly spaced based on the
DATE RANGE, not the number of samples, in case samples
are unevenly spaced.
'''
# Set the minimum value in case it is None
if (min_value is None):
min_value = X_time.min().item()
# Find the maximum value available in the data
max_value_in_data = X_time.max().item()
# We usually don't want to consider the entire range, so we only
# consider a certain section, dictated by changepoint_range
max_distance = (max_value_in_data - min_value) * changepoint_range
max_value = min_value + max_distance
# When fitting, we don't want the first day to be a changepoint candidate
# However, when predicting the future, it is very much possible our first
# prediction day is a changepoint
if (drop_first):
changepoints = np.linspace(min_value, max_value, changepoint_num+1, dtype=np.float32)
changepoints = changepoints[1:] # The first entry will always be 0, but we don't
# want a changepoint right in the beginning
else:
changepoints = np.linspace(min_value, max_value, changepoint_num, dtype=np.float32)
changepoints = torch.tensor(changepoints, dtype=torch.float32)
return changepoints
########################################################################################################################
def __make_A_matrix(self, X_time, changepoints):
'''
A function which takes in the time tensor, and the changepoints
chosen, and produces a matrix A which specifies when to add the
effect of the changepoints
Parameters:
------------
X_time - [tensor] A tensor of the time, in seconds
changepoints - [tensor] A tensor of changepoints where each element
is a second when a changepoint can happen
Returns:
------------
A - [tensor] A tensor of shape (n_samples, S), where n_samples
is the number of samples in X_time, and S
is the number of changepoints
'''
A = torch.zeros((X_time.shape[0], len(changepoints)))
# For each row t and column j,
# A(t, j) = 1 if X_time[t] >= changepoints[j]. i.e. if the current time
# denoted by that row is greater or equal to the time of the most recent
# changepoint
for j in range(A.shape[1]):
row_mask = (X_time >= changepoints[j])
A[row_mask, j] = 1.0
return A
########################################################################################################################
def __check_incoming_data_for_nulls(self, data, predictions=False):
'''
Checks incoming data for null values and advises the user what to
do
Parameters:
------------
data - [pd.DataFrame] - A pandas dataframe which contains
the incoming data
predictions - [bool] A flag which determines if we are in prediction
mode. If we are, we don't care about the target column
containing null values since we won't be using it
Returns:
------------
None
'''
if (data[self.__target_col].isna().sum() > 0):
if (predictions == False):
raise ValueError(f"{self.__target_col} contains null values, which Chronos cannot process. Consider either removing the null values, or setting them as 0, whichever makes more sense for your data")
elif (data[self.__time_col].isna().sum() > 0):
raise ValueError(f"{self.__time_col} contains null values, which Chronos cannot process. Consider removing these values")
else:
for col in data.columns:
if (col not in [self.__time_col, self.__target_col]):
if (data[col].isna().sum() > 0):
raise ValueError(f"{col} contains null values, which Chronos cannot process. Consider removing these values")
########################################################################################################################
def fit(self, data, time_col = "ds", target_col="y"):
'''
A function which performs fitting of the required method on the data provided,
and thus estimates the parameters of this model.
Parameters:
------------
data - [DataFrame] A pandas dataframe with at least two columns. One
specifying the timestamp, and one specifying the target value
(the time series observations). The default expected column
names are 'ds' and 'y' but can be set to other names.
time_col - [str] A string denoting the name of the timestamp column.
Default is 'ds'
target_col - [str] A string denoting the name of the time series
observation column.
Default is 'y'
Returns:
------------
self - [Chronos] A fitted Chronos model
'''
# Record the time-series named columns. We will use them a lot
self.__time_col = time_col
self.__target_col = target_col
self.__check_incoming_data_for_nulls(data, predictions=False)
# Make a copy of the history
self.history = data.copy()
if (self.history[self.__time_col].dt.day_name().isin(["Sunday", "Saturday"]).any() == False):
print("No weekends found in training data, will only consider Monday - Friday")
self.__trained_on_weekend = False
else:
self.__trained_on_weekend = True
X_time, X_dataframe, y = self.__transform_data(data)
number_of_valid_changepoints = int(X_time.shape[0] * self.__proportion_of_data_subject_to_changepoints)
if (number_of_valid_changepoints < self.__number_of_changepoints):
warnings.warn(f"Number of datapoints in range, {number_of_valid_changepoints}, is smaller than number of changepoints, {self.__number_of_changepoints}. Using {number_of_valid_changepoints} instead", RuntimeWarning)
self.__number_of_changepoints = number_of_valid_changepoints
# Compute the changepoint frequency in changepoints/seconds
self.__changepoint_frequency = self.__number_of_changepoints/(self.__history_max_time_seconds - self.__history_min_time_seconds)
# Find a set of evenly spaced changepoints in the training data, and
# buid a matrix describing the effect of the changepoints on each timepoint
self.__changepoints = self.__find_changepoint_positions(X_time,
self.__number_of_changepoints,
self.__proportion_of_data_subject_to_changepoints)
self.__trend_components = {}
self.__multiplicative_components = {}
self.__additive_components = {}
if (self.__method in ["MLE", "MAP"]): # Point estimate methods
if (self.__method == "MLE"):
print("Employing Maximum Likelihood Estimation")
self.__model = self.__model_function
self.__guide = self.__guide_MLE
self.__param_prefix = ""
elif (self.__method == "MAP"):
print("Employing Maximum A Posteriori")
self.__model = self.__model_function
self.__guide = AutoDelta(self.__model, init_loc_fn=init_to_feasible)
self.__param_prefix = "AutoDelta."
self.__train_point_estimate(X_time,
X_dataframe,
y)
elif (self.__method == "MCMC"):
print("Employing Markov Chain Monte Carlo")
raise NotImplementedError("Did not implement MCMC methods")
return self
########################################################################################################################
def __train_point_estimate(self, X_time, X_dataframe, y):
'''
A function which takes in the model and guide to use for
the training of point estimates of the parameters, as well as
the regressor tensors, the changepoint matrix, and the target,
and performs optimization on the model parameters
Parameters:
------------
X_time - [tensor] The time tensor specifying the
time regressor
X_dataframe - [pd.DataFrame] A pandas dataframe containing all
columns except for the target. This dataframe is
included if additional regressors are requested
y - [tensor] The target to predict, i.e. the time
series measurements
Returns:
------------
None
'''
# Make sure we are working with a fresh param store
# TODO: see if there is a way to protect this
pyro.clear_param_store()
# Use a decaying optimizer which starts with a given learning
# rate, but then slowly drops it to take smaller and smaller
# steps
optimizer = Rprop
scheduler = pyro.optim.ExponentialLR({'optimizer': optimizer,
'optim_args': {'lr': self.__learning_rate},
'gamma': 0.9})
# Use the ELBO (evidence lower bound) loss function
# and Stochastic Variational Inference optimzation
# technique
my_loss = Trace_ELBO()
self.svi_ = SVI(self.__model,
self.__guide,
scheduler,
loss=my_loss)
# Calculate when to print output
print_interval = max(self.__number_of_iterations//10000, 10)
# Keep track of this for MAE metric
y_true = y.detach().numpy().copy()
if (self.__y_max is not None):
y_true *= self.__y_max
# Create a predictive object to predict for us for
# metric reporting purposes
predictive = Predictive(model=self.__model,
guide=self.__guide,
num_samples=1,
return_sites=("_RETURN",))
# Iterate through the optimization
for step in range(self.__number_of_iterations):
loss = self.svi_.step(X_time,
X_dataframe,
y)
# After calculating the loss, normalize by the
# number of points
loss = round(loss/y.shape[0], 4)
# If required, print out the results
if (step % print_interval == 0):
pct_done = round(100*(step+1)/self.__number_of_iterations, 2)
# If we're reporting, grab samples for the predictions
samples = predictive(X_time,
X_dataframe)
y_pred = samples["_RETURN"].detach().numpy()[0]
if (self.__y_max is not None):
y_pred *= self.__y_max
# Calculate mean absolute error and format it nicely
mean_absolute_error_loss = "{:.4f}".format(np.mean(np.abs(y_true - y_pred)))
print(" "*100, end="\r")
print(f"{pct_done}% - ELBO loss: {loss} | Mean Absolute Error: {mean_absolute_error_loss}", end="\r")
# Always have a final printout
pct_done = 100.0
print(" "*100, end="\r")
print(f"{pct_done}% - ELBO loss: {loss} | Mean Absolute Error: {mean_absolute_error_loss}")
########################################################################################################################
def __add_future_changepoints(self, past_deltas, future_trend_period):
'''
A function which accepts a changepoint matrix, and a changepoint rate change
tensor and adds compares their sizes. If the matrix A specifies more
changepoints than deltas, new changepoint values are added to deltas.
Otherwise deltas is unchanged
The additions to deltas are randomly drawn from a Laplace distribution since
they are simulations of potential future changepoints, and thus are not fixed.
Each run of this function is designed to be a single possible future.
Parameters:
------------
past_deltas - [tensor] A 1D tensor specifying the increase, or
decrease, in slope at each changepoint. The size is
(S, ) where S is the number of changepoints
future_trend_period - [int] The duration of the future trend, in seconds.
This is the number of seconds the future trend spans,
not the number of observations (for example, there
can be two observations, 15 seconds apart, so the
period will be 15 seconds)
Returns:
------------
deltas - [tensor] A new 1D tensor which contains the increase,
or decrease, in slope for both past and future
changepoints.
'''
# Find the number of future changepoints in this simulation
extra_changepoint_num = np.random.binomial(n=future_trend_period, p = self.__changepoint_frequency)
# Infer future changepoint scale
future_laplace_scale = torch.abs(past_deltas).mean()
if (future_laplace_scale > 0.0):
changepoint_dist = torch.distributions.Laplace(0, future_laplace_scale)
# The future changepoints can be any value from the
# inferred Laplace distribution
future_deltas = changepoint_dist.sample((extra_changepoint_num,))
else:
future_deltas = torch.zeros(extra_changepoint_num)
# Combine the past change rates as
# well as future ones
deltas = torch.cat([past_deltas, future_deltas])
return deltas
########################################################################################################################
def __simulate_potential_future(self, X_time, past_deltas):
'''
A function which simulates a potential future to account for future
changepoints over X_time. The future may or may not contain changepoints
and so a single run of this function simulates a potential future
where additional changepoints are added.
X_time can contain the time tensor for both past and future
Parameters:
------------
X_time - [tensor] The time tensor specifying the time regressor
past_deltas - [tensor] A 1D tensor specifying the increase, or decrease,
in slope at each changepoint. The size is (S, ) where S
is the number of changepoints in the past, and not for
the entire duration of X_time
Returns:
------------
A tuple of (deltas, combined_changepoints, A)
deltas - [tensor] A 1D tensor of the rate adjustments for the
entire time of X_time.
future_changepoints - [tensor] A 1D Tensor specifing the times, where each
changepoint occurs. May be the same size as
past_changepoints if no new changepoints have been
simulated
'''
# Simulate potential changepoint generation We've scaled the history so the last timestamp
# is 1.0, so we need to find, proportionally, how much the future period is
# bigger
future_raw_value = (X_time.max() - 1.0)
future_seconds_number = int(future_raw_value * (self.__history_max_time_seconds - self.__history_min_time_seconds))
deltas = self.__add_future_changepoints(past_deltas, future_seconds_number)
# Count the number of future changepoints simulated
future_changepoint_number = int(deltas.shape[0] - self.__number_of_changepoints)
# If we need to simulate a certain number of future changepoints,
# we will randomly draw their positions and create a new A
# matrix to be used to correct the trend.
# Otherwise, we can continue as usual
if (future_changepoint_number > 0):
# The values start at 1.0, and torch doesn't have a random number generator
# for random floats, only the values from 0.0 - 1.0, so need to do some magic
first_future_trend_value = 1.0#X_time[-future_changepoint_number].item()
last_future_trend_value = X_time[-1].item()
# Make some random values
random_values = torch.rand(size = (future_changepoint_number, )).type(torch.float32)
# Employ inverse scaling to get the values from 0.0 - 1.0 to first_future_trend_value - last_future_trend_value
future_changepoints = random_values * (last_future_trend_value - first_future_trend_value) + first_future_trend_value
else:
future_changepoints = torch.tensor([])
return deltas, future_changepoints
########################################################################################################################
def __sample_initial_slope_and_intercept(self, method):
'''
A function which samples the initial values for the slope and
intercept. Depending on the method these could be distributions
or single parameters
Parameters:
------------
method - [str] A method describing which sampling method
to employ. If MAP, the samples are from a prior
distribution. If MLE, the samples are point parameters.
Returns:
------------
return_tuple - [tuple] A tuple of an intercept and slope;
intercept_init - [float] The value of the initial
intercept
slope_init - [float] The value of the initial
slope
'''
if (method == "MAP"):
# Define paramters for the constant and slope
intercept_init = pyro.sample("intercept",
dist.Normal(0.0, 10.0))
slope_init = pyro.sample("trend_slope",
dist.Normal(0.0, 10.0))
elif (method == "MLE"):
intercept_init = pyro.param("intercept",
torch.tensor(0.0))
slope_init = pyro.param("trend_slope",
torch.tensor(0.0))
return intercept_init, slope_init
########################################################################################################################
def __sample_past_slope_changes(self, method):
'''
A function which samples the values for the slope changes.
Depending on the method these could be distributions
or single parameters
Parameters:
------------
method - [str] A method describing which sampling method
to employ. If MAP, the samples are from a prior
distribution. If MLE, the samples are point parameters.
Returns:
------------
past_deltas - [tensor] A tensor of the slope changes for the
trend component.
'''
if (method == "MAP"):
# sample from a Laplace distribution
means = torch.zeros(self.__number_of_changepoints)
scales = torch.full((self.__number_of_changepoints, ), self.__changepoint_prior_scale)
slope_change_laplace_distribution = dist.Laplace(means, scales).to_event(1)
past_deltas = pyro.sample("delta", slope_change_laplace_distribution)
elif (method == "MLE"):
past_deltas = pyro.param("delta",
torch.zeros(self.__number_of_changepoints))
return past_deltas
########################################################################################################################
def __sample_seasonalities_coefficients(self, method, seasonality_component, seasonality_name):
'''
A function which samples the values for the seasonality coefficients.
Depending on the method these could be distributions or single parameters
Parameters:
------------
method - [str] A method describing which sampling method
to employ. If MAP, the samples are from a prior
distribution. If MLE, the samples are point
parameters.
seasonality_component - [tensor] The matrix of seasonality fourier
sine-cosine pairs. This matrix has an even number
of rows. Each row pair is a sine-cosine pair of
the seasonality fourier series
seasonality_name - [str] The name of the seasonality
Returns:
------------
betas_seasonality - [tensor] A tensor of the coefficients for the seasonality
components. For a single seasonality these are
coefficients for a pair of sine-cosine fourier terms
'''
if (method == "MAP"):
# sample from a normal distribution
means = torch.zeros(seasonality_component.size(1))
standard_deviations = torch.full((seasonality_component.size(1),), 10.0)
seasonality_coefficient_normal_distribution = dist.Normal(means, standard_deviations).to_event(1)
betas_seasonality = pyro.sample(f"betas_{seasonality_name}",
seasonality_coefficient_normal_distribution)
elif (method == "MLE"):
betas_seasonality = pyro.param(f"betas_{seasonality_name}",
torch.zeros(seasonality_component.size(1)))
return betas_seasonality
########################################################################################################################
def __sample_additional_regressors_coefficients(self, method, regressor_compoents, regressor_modes):
'''
A function which samples the values for the additional regressor
coefficients. Depending on the method these could be distributions
or single parameters
Parameters:
------------
method - [str] A method describing which sampling method
to employ. If MAP, the samples are from a prior
distribution. If MLE, the samples are point parameters
regressor_compoents - [tensor] A matrix of the additional regressors
for which coefficients are requested
regressor_modes - [str] The mode of regressor incorporation.
provided for naming purposes but should be
either "mul" or "add"
Returns:
------------
betas_regressors - [tensor] A tensor of the coefficients for the
additional regressor components.
'''
if (method == "MAP"):
means = torch.zeros(regressor_compoents.size(1))
standard_deviations = torch.full((regressor_compoents.size(1),), 10.0)
additional_regressor_normal_distribution = dist.Normal(means, standard_deviations).to_event(1)
# Compute coefficients for the regressors
betas_regressors = pyro.sample(f"betas_{regressor_modes}_regressors",
additional_regressor_normal_distribution)
elif (method == "MLE"):
betas_regressors = pyro.param(f"betas_{regressor_modes}_regressors",
torch.zeros(regressor_compoents.size(1)))
return betas_regressors
########################################################################################################################
########################################################################################################################
########################################################################################################################
def __guide_MLE(self, X_time, X_dataframe, y=None):
'''
A function which specifies a special guide which does nothing.
This guide is used in MLE optimization since there is no
relationship between parameters and prior distributions.
Parameters:
------------
X_time - [tensor] The time tensor specifying the time
regressor
X_dataframe - [pd.DataFrame] A pandas dataframe containing all
columns except for the target. This dataframe is
included if additional regressors are requested
y - [tensor] The target to predict, i.e. the time
series measurements. If None, the model
generates these observations instead.
Returns:
------------
None
'''
pass
########################################################################################################################
########################################################################################################################
def __predict_normal_likelihood(self, method, mu, y):
'''
A function which takes up the expected values and employs them
to specify a normal distribution, conditioned on the
observed data.
An additional sigma (standard deviation) value is registered as
either a distribution or a parameter based on the method used
Parameters:
------------
method - [str] Which method is used. Either MAP
or MLE
mu - [tensor] The expected values computed from
the model paramters
y - [tensor] The observed values
Returns:
------------
None
'''
# Define additional paramters specifying the likelihood
# distribution.
if (method == "MAP"):
sigma = pyro.sample("sigma", dist.HalfCauchy(1.0))
elif (method == "MLE"):
sigma = pyro.param("sigma",
torch.tensor(1.0),
constraint = constraints.positive)
# Finally sample from the likelihood distribution and
# optionally condition on the observed values.
# If y is None, this simply samples from the distribution
with pyro.plate("data", mu.size(0)):
pyro.sample("obs", dist.Normal(mu, sigma), obs=y)
########################################################################################################################
def __predict_studentT_likelihood(self, method, mu, y):
'''
A function which takes up the expected values and employs them
to specify a Student t-distribution, conditioned on the
observed data.
Additional sigma (standard deviation), and df (degrees of freedom)
values are registered as either distributions or parameters
based on the method used
Parameters:
------------
method - [str] Which method is used. Either MAP
or MLE
mu - [tensor] The expected values computed from
the model paramters
y - [tensor] The observed values
Returns:
------------
None
'''
# Define additional paramters specifying the likelihood
# distribution.
if (method == "MAP"):
sigma = pyro.sample("sigma", dist.HalfCauchy(1.0))
df = pyro.sample("df", dist.HalfCauchy(1.0))
elif (method == "MLE"):
sigma = pyro.param("sigma",
torch.tensor(1.0),
constraint = constraints.positive)
df = pyro.param("df",
torch.tensor(1.0),
constraint = constraints.positive)
# Finally sample from the likelihood distribution and
# optionally condition on the observed values.
# If y is None, this simply samples from the distribution
with pyro.plate("data", mu.size(0)):
pyro.sample("obs", dist.StudentT(df, mu, sigma), obs=y)
########################################################################################################################
def __predict_gamma_likelihood(self, method, mu, y):
'''
A function which takes up the expected values and employs them
to specify a gamma distribution, conditioned on the
observed data.
An additional rate value is registered as either a distribution
or a parameter based on the method used, and a shape tensor
is computed based on the rate and mu.
Parameters:
------------
method - [str] Which method is used. Either MAP
or MLE
mu - [tensor] The expected values computed from
the model paramters
y - [tensor] The observed values
Returns:
------------
None
'''
# Define additional paramters specifying the likelihood
# distribution.
if (method == "MAP"):
rate = pyro.sample("rate", dist.HalfCauchy(1.0)).clamp(min=torch.finfo(torch.float32).eps)
elif (method == "MLE"):
rate = pyro.param("rate",
torch.tensor(1.0),
constraint = constraints.positive)
shape = rate * mu
if (y is not None):
y_obs = y + torch.finfo(torch.float32).eps
else:
y_obs = y
# Finally sample from the likelihood distribution and
# optionally condition on the observed values.
# If y is None, this simply samples from the distribution
with pyro.plate("data", mu.size(0)):
pyro.sample("obs", dist.Gamma(concentration=shape, rate=rate), obs=y_obs)
########################################################################################################################
########################################################################################################################
def __predict_likelihood(self, method, distribution, mu, y):
'''
A function which takes up the trend, seasonalities, and additional regressors
and combines them to form the expected values, then conditions them
on the observed data based on the distribution requested
Parameters:
------------
method - [str] Which method is used
distribution - [str] Which distribution to use. Must be one
of the distributions specified in chronos_utils
mu - [tensor] The expected values computed from
the model paramters
y - [tensor] The observed values
Returns:
------------
None
'''
if (distribution == chronos_utils.Normal_dist_code):
self.__predict_normal_likelihood(method, mu, y)
elif (distribution == chronos_utils.StudentT_dist_code):
self.__predict_studentT_likelihood(method, mu, y)
elif (distribution == chronos_utils.Gamma_dist_code):
self.__predict_gamma_likelihood(method, mu, y)
########################################################################################################################
def __compute_changepoint_positions_and_values(self, X_time, y):
'''
A function which finds the position of changepoints in the time
frame provided, and samples their values. If unobserved time
frame is provided (prediction), also simulated future
changepoints.
Parameters:
------------
X_time - [tensor] The tensor of timesteps, in seconds
y - [tensor] The tensor of observed values. Or None if
we are in prediction mode
Returns:
------------
changepoint_tuple - [tuple] A tuple of:
deltas - [tensor] The changepoint values
combined_changepoints - [tensor] the changepoint positions
'''
past_deltas = self.__sample_past_slope_changes(self.__method)
# If no observations are given, we assume we are in
# prediction mode. Therefore, we have to generate possible scenarios
# for the future changepoints as a simulation
if (y is None):
deltas, future_changepoints = self.__simulate_potential_future(X_time, past_deltas)
combined_changepoints = torch.cat([self.__changepoints, future_changepoints])
else:
# If we are not in prediction mode, we only care about learning the past
deltas = past_deltas
combined_changepoints = self.__changepoints
return deltas, combined_changepoints
########################################################################################################################
def __compute_trend(self, X_time, y):
'''
A function which computes the trend component T(t). The function
computes the initial slope, intercept, and changepoints for the
time series and then combines them.
Parameters:
------------
X_time - [tensor] The tensor of timesteps, in seconds
y - [tensor] The tensor of observed values. Or None if
we are in prediction mode
Returns:
------------
trend - [tensor] The trend tensor which describes the growth
excluding seasonalities and additional regressors
'''
intercept_init, slope_init = self.__sample_initial_slope_and_intercept(self.__method)
deltas, combined_changepoints = self.__compute_changepoint_positions_and_values(X_time, y)
A = self.__make_A_matrix(X_time, combined_changepoints)
# To adjust the rates we also need to adjust the displacement during each rate change
intercept_adjustments = -deltas * combined_changepoints
# There is a unique slope value and intercept value for each
# timepoint to create a piece-wise function
slope = slope_init + torch.matmul(A, deltas)
intercept = intercept_init + torch.matmul(A, intercept_adjustments)
# Finally compute the trend component and record it in the global
# parameter store using the pyro.deterministic command
trend = slope * X_time + intercept
if (self.__make_likelihood_mean_positive == True):
trend = torch.nn.functional.softplus(trend, beta=100)
pyro.deterministic('trend', trend)
return trend
########################################################################################################################
def __create_seasonality_from_specs(self, X_date, seasonality_specs, seasonality_memoization_dictionary):
'''
A function which takes up a series of dates, and specs on how to construct
seasonality from the dates, and returns the constructed seasonality.
If the seasonality fourier terms have been calculated once they are
loaded from the seasonality_memoization_dictionary. Otherwise, they are
stored there after being computed
Parameters:
------------
X_date - [pd.Series] A pandas series of dates
as dtype pd.datetime64
seasonality_specs - [dict] A dictionary with the
seasonality specifications
seasonality_memoization_dictionary - [dict] A memoization dictionary which
stores the fourier components of the
seasonality after it is calculated
once
Returns:
------------
X_seasonality - [tensor] A tensor containing the
values of the computed seasonality.
The tensor is the same length as
the date series inputted.
'''
seasonality_name = seasonality_specs['name']
seasonality_order = seasonality_specs['order']
seasonality_extraction_function = seasonality_specs['extraction_function']
if (seasonality_name in seasonality_memoization_dictionary):
seasonality_tensor = seasonality_memoization_dictionary[seasonality_name]
else:
cycle = torch.tensor(2 * np.pi * seasonality_extraction_function(X_date).values)
seasonality_tensor = torch.empty(X_date.shape[0], seasonality_order*2)
index = 0
for f in range(seasonality_order):
fourier_term = (f+1) * cycle
seasonality_tensor[:, index] = np.sin(fourier_term)
seasonality_tensor[:, index+1] = np.cos(fourier_term)
index += 2
seasonality_memoization_dictionary[seasonality_name] = seasonality_tensor
betas_seasonality = self.__sample_seasonalities_coefficients(self.__method, seasonality_tensor, seasonality_name)
X_seasonality = seasonality_tensor.matmul(betas_seasonality)
return X_seasonality
########################################################################################################################
def __compute_multiplicative_seasonalities_product(self, X_date):
'''
A function which accepts a pandas date series and computes all
multiplicative seasonalities. Then combines the seasonalities
by computing their product.
e.g. if seasonalities s1, s2, and s3 are requested, the resulting
tensor is (s1 * s2 * s3).
The seasonalities have 1.0 added to them so that positive values
amplify the results, and negative values dampen the results.
Parameters:
------------
X_date - [pd.Series] A pandas series of dates as
dtype pd.datetime64
Returns:
------------
total_seasonalities_product - [tensor] A tensor of all multiplicative
seasonalities, multiplied together.
'''
total_seasonalities_product = torch.ones(X_date.shape[0], )
for multiplicative_seasonality in self.__multiplicative_seasonalities:
X_seasonality = self.__create_seasonality_from_specs(X_date,
multiplicative_seasonality,
self.__multiplicative_components)
X_seasonality = 1.0 + X_seasonality
total_seasonalities_product = total_seasonalities_product * X_seasonality
return total_seasonalities_product
########################################################################################################################
def __compute_multiplicative_regressors_product(self, X_dataframe):
'''
A function which accepts a pandas dataframe and computes all
multiplicative regressors' coefficients and effects.
Then combines the regressors by computing the individual
regressors' product.
e.g. if regressor effects r1, r2, and r3 are requested, the resulting
tensor is (r1 * r2 * r3).
The regressors have 1.0 added to them so that positive values
amplify the results, and negative values dampen the results.
Parameters:
------------
X_dataframe - [pd.DataFrame] A pandas dataframe which
contains the regressor values.
Returns:
------------
multiplicative_regressors_product - [tensor] A tensor of all multiplicative
seasonalities, multiplied together.
'''
for additional_regressor in self.__multiplicative_additional_regressors:
if (additional_regressor not in X_dataframe.columns):
raise KeyError(f"Regressor '{additional_regressor}' not found in provided dataframe")
if ("regressors" not in self.__multiplicative_components):
X_multiplicative_regressors = torch.tensor(X_dataframe[self.__multiplicative_additional_regressors].values)
self.__multiplicative_components["regressors"] = X_multiplicative_regressors
else:
X_multiplicative_regressors = self.__multiplicative_components["regressors"]
betas_multiplicative_regressors = self.__sample_additional_regressors_coefficients(self.__method,
X_multiplicative_regressors,
"mul")
# Notice we don't do a matrix-vector product here, but rather an
# row-wise multipication. The reason is we want to then find a cumulative
# product of all multiplicative regressors.
# i.e. if I have regressors reg1, reg2, and reg3, I want to multiply the trend
# by (reg1 * reg2 * reg3) rather than by (reg1 + reg2 + reg3)
multiplicative_regressors = X_multiplicative_regressors * betas_multiplicative_regressors
# The multiplicative regressors have to be adjusted so that they are larger than 1
# when positive, and between 0.0 and 1.0 when negative so that their multiplicative
# effect either dampens, or amplifies, the results, but never flips the sign
multiplicative_regressors = (1 + multiplicative_regressors)
multiplicative_regressors_product = torch.prod(multiplicative_regressors, dim=1)
return multiplicative_regressors_product
########################################################################################################################
def __compute_multiplicative_component(self, X_dataframe):
'''
A function which looks at the data and computes all components that were
labeled as multiplicative. The function computes the product of all
multiplicative seasonalities, then the product of all multiplicative
regressors, and then produces their product.
Parameters:
------------
X_dataframe - [pd.DataFrame] A pandas dataframe which contains
all columns of the data except the target
Returns:
------------
multiplicative_component - [tenspr] A tensor containing the data for the
multiplicative component of the model
'''
X_date = X_dataframe[self.__time_col]
multiplicative_seasonalities_product = self.__compute_multiplicative_seasonalities_product(X_date)
multiplicative_regressors_product = self.__compute_multiplicative_regressors_product(X_dataframe)
multiplicative_component = multiplicative_seasonalities_product * multiplicative_regressors_product
return multiplicative_component
########################################################################################################################
def __compute_additive_seasonalities_sum(self, X_date):
'''
A function which accepts a pandas date series and computes all
additive seasonalities. Then combines the seasonalities
by computing their sum.
e.g. if seasonalities s1, s2, and s3 are requested, the resulting
tensor is (s1 + s2 + s3).
Parameters:
------------
X_date - [pd.Series] A pandas series of dates as
dtype pd.datetime64
Returns:
------------
total_seasonalities_sum - [tensor] A tensor of all additive
seasonalities, summed together.
'''
total_seasonalities_sum = torch.zeros(X_date.shape[0], )
for additive_seasonality in self.__additive_seasonalities:
X_seasonality = self.__create_seasonality_from_specs(X_date,
additive_seasonality,
self.__additive_components)
total_seasonalities_sum = total_seasonalities_sum + X_seasonality
return total_seasonalities_sum
########################################################################################################################
def __compute_additive_regressors_sum(self, X_dataframe):
'''
A function which accepts a pandas dataframe and computes all
additive regressors' coefficients and effects.
Then combines the regressors by computing the individual
regressors' sum.
e.g. if regressor effects r1, r2, and r3 are requested, the resulting
tensor is (r1 + r2 + r3).
Parameters:
------------
X_dataframe - [pd.DataFrame] A pandas dataframe which
contains the regressor values.
Returns:
------------
additive_regressors_sum - [tensor] A tensor of all additive
seasonalities, summed together.
'''
for additional_regressor in self.__additive_additional_regressors:
if (additional_regressor not in X_dataframe.columns):
raise KeyError(f"Regressor '{additional_regressor}' not found in provided dataframe")
if ("regressors" not in self.__additive_components):
X_additive_regressors = torch.tensor(X_dataframe[self.__additive_additional_regressors].values, dtype=torch.float32)
self.__additive_components["regressors"] = X_additive_regressors
else:
X_additive_regressors = self.__additive_components["regressors"]
betas_additive_regressors = self.__sample_additional_regressors_coefficients(self.__method, X_additive_regressors, "add")
additive_regressors_sum = X_additive_regressors.matmul(betas_additive_regressors)
return additive_regressors_sum
########################################################################################################################
def __compute_additive_component(self, X_dataframe):
'''
A function which looks at the data and computes all components that were
labeled as additive. The function computes the sum of all
additive seasonalities, then the sum of all additive
regressors, and then produces their sum.
Parameters:
------------
X_dataframe - [pd.DataFrame] A pandas dataframe which contains
all columns of the data except the target
Returns:
------------
additive_component - [tenspr] A tensor containing the data for the
additive component of the model
'''
X_date = X_dataframe[self.__time_col]
additive_seasonalities_sum = self.__compute_additive_seasonalities_sum(X_date)
additive_regressors_sum = self.__compute_additive_regressors_sum(X_dataframe)
additive_component = additive_seasonalities_sum + additive_regressors_sum
return additive_component
########################################################################################################################
def __model_function(self, X_time, X_dataframe, y=None):
'''
The major powerhouse function of this object, performs the modeling of
the generative process which generates y from X_dataframe and X_time.
Returns the expected values given the regressors and the learned
parameters.
Parameters:
------------
X_time - [tensor] A tensor of time values, normalized. If
we are training, this will contain values between 0.0
and 1.0. Otherwise, it will contain values normalized to
reflect the range of the training data (i.e. from 1.0 unward
if it contains only future times, and from 0.0 to 1.0 and
unward if it contains both past and future times).
X_dataframe - [pd.DataFrame] A pandas dataframe which contains the
raw data passed into this Chronos object. Used to
compute seasonality and additional regressors
y - [tesnor] A tensor of the observaed values, normalized to
the range 0.0 - 1.0, if we are training, or a None object
if we are predicting.
Returns:
------------
mu - [tensor] A tensor of the expected values given X_dataframe
and the learned parameters.
'''
prediction_mode = y is None
if (prediction_mode):
# Poor man's verbose printing of prediction number
if (self.__prediction_verbose == True):
self.predict_counter_ += 1
if (self.predict_counter_ > 0):
print(f"Prediction no: {self.predict_counter_}", end="\r")
trend = self.__compute_trend(X_time, y)
multiplicative_component = self.__compute_multiplicative_component(X_dataframe)
additive_component = self.__compute_additive_component(X_dataframe)
mu = (trend * multiplicative_component) + additive_component
if (self.__make_likelihood_mean_positive == True):
mu = torch.nn.functional.softplus(mu, beta=100)
mu = mu + torch.finfo(torch.float32).eps
# Sample observations based on the appropriate distribution
self.__predict_likelihood(self.__method,
self.__y_likelihood_distribution,
mu,
y)
return mu
########################################################################################################################
def predict(self,
future_df=None,
sample_number=1000,
ci_interval=0.95,
period=30,
frequency='D',
include_history=True,
verbose=True):
'''
A function which accepts a dataframe with at least one column, the timestamp
and employes the learned parameters to predict observations as well as
credibility intervals and uncertainty intervals.
Alternatively, the function can accept the parameters accepted by
.make_future_dataframe and produce the future dataframe internally.
Returns a dataframe with predictions for observations, upper and lower limits
for credibility intervals, trend, and upper and lower limits on trend
uncertainty.
Parameters:
------------
future_df - [DataFrame] The dataframe. Must at least have a single
column of timestamp with the same name as the training
dataframe. If data is not provided, period, frequency,
and include_history must be provided. If data is
provided, period, frequency, and include_history are
ignored
Default is None
sample_number - [int] The number of posterior samples to generate in
order to draw the uncertainty intervals and credibility
intervals. Larger values give more accurate results,
but also take longer to run.
Default 1000
ci_interval - [float] The credibility interval range to generate.
Must be between 0.0 and 1.0, 0.95 generates a range
such that 95% of all observations fall within this
range.
Default is 0.95.
period - [int] The number of future observations based on
frequency. The default is 30, and the default
for frequency id 'D' which means 30 days ahead.
Default is 30
frequency - [str] The frequency of the period. See
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases
for a list of supported freuqncies.
The default is 'D' which stands for calendar
day.
Default is 'D'
incldue_history - [bool] A boolean describing whether to include
history observations or not used by the fit method.
Default is True
Returns:
------------
predictions - [DataFrame] A dataframe with:
[time_col] - The time column fed into this method
[target_col] - The name for the original target
column if history is included in the dataframe
yhat - The predicted value for observations
yhat_upper - The upper value for uncertainty
+ credibility interval
yhat_lower - The lower value for uncertainty
+ credibility interval
trend - The predicted value for the trend,
excluding seasonality
trend_upper - The upper value for trend uncertainty
trend_lower - The lower value for trend uncertainty
Seasonality is not returned in the dataframe, but is
incorporated when computing yhat.
'''
self.__prediction_verbose = verbose
# Make a future dataframe if one is not provided
if (future_df is None):
future_df = self.make_future_dataframe(period=period,
frequency=frequency,
include_history=include_history)
self.__check_incoming_data_for_nulls(future_df, predictions=True)
# Transform data into trend and seasonality as before
X_time, X_dataframe, y = self.__transform_data(future_df)
# For some reason the predictive method runs for 2 extra runs
# so we need to set the counter to -2 to tell the predictive method
# when to start printing out output
self.predict_counter_ = -2
self.__trend_components = {}
self.__multiplicative_components = {}
self.__additive_components = {}
# For point estimates, use the predictive interface
if (self.__method in ["MAP", "MLE"]):
# https://pyro.ai/examples/bayesian_regression.html#Model-Evaluation
predictive = Predictive(model=self.__model,
guide=self.__guide,
num_samples=sample_number,
return_sites=("obs", "trend"))
samples = predictive(X_time,
X_dataframe)
# Calculate ntiles based on the CI provided. Each side should have
# CI/2 credibility
space_on_each_side = (1.0 - ci_interval)/2.0
lower_ntile = int(len(samples['obs']) * space_on_each_side)
upper_ntile = int(len(samples['obs']) * (1.0 - space_on_each_side))
# The resulting tensor returns with (sample_number, 1, n_samples) shape
trend_array = samples['trend'].squeeze()
# Calculate uncertainty
trend = trend_array.mean(dim=0)
trend_upper = trend_array.max(dim=0).values
trend_lower = trend_array.min(dim=0).values
# Build the output dataframe
predictions = pd.DataFrame({"yhat": torch.mean(samples['obs'], 0).detach().numpy(),
"yhat_lower": samples['obs'].kthvalue(lower_ntile, dim=0)[0].detach().numpy(),
"yhat_upper": samples['obs'].kthvalue(upper_ntile, dim=0)[0].detach().numpy(),
"trend": trend.detach().numpy(),
"trend_lower": trend_lower.detach().numpy(),
"trend_upper": trend_upper.detach().numpy()})
# Incorporate the original values, and build the column order to return
columns_to_return = []
columns_to_return.append(self.__time_col)
predictions[self.__time_col] = future_df[self.__time_col]
if (y is not None):
predictions[self.__target_col] = y.detach().numpy()
columns_to_return.append(self.__target_col)
columns_to_return.extend(['yhat', 'yhat_upper', 'yhat_lower',
'trend', 'trend_upper', 'trend_lower'])
predictions = predictions[columns_to_return]
numeric_columns = columns_to_return[1:]
if (self.__y_max is not None):
predictions[numeric_columns] *= self.__y_max
return predictions
else:
raise NotImplementedError(f"Did not implement .predict for {self.__method}")
########################################################################################################################
def make_future_dataframe(self, period=30, frequency="D", include_history=True):
'''
A function which takes in a future range specified by the period and the
frequency and returns a dataframe which can be used by the predict method.
By default, the history is included as well for easy diagnostics via the
plotting methods.
NOTE: You should only use this method if you plan on adding additional
custom regressors. If there are no custom regressors involved you can
use the .predict method directly
Parameters:
------------
period - [int] The number of future observations based on
frequency. The default is 30, and the default
for frequency id 'D' which means 30 days ahead.
Default is 30
frequency - [str] The frequency of the period. See
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases
for a list of supported freuqncies.
The default is 'D' which stands for calendar
day.
Default is 'D'
incldue_history - [bool] A boolean describing whether to include history
observations or not used by the fit method.
Default is True
Returns:
------------
future_df - [DataFrame] A dataframe with a datestamp column, and a
target column ready to be used by the
predict method. The datestamp and target
column names are the same as the ones
used in the fitting dataframe.
'''
# Find the highest timestamp observed, and build a new series of timestamps starting
# at that timestamp, then remove the first of that series. The resulting date range
# will begin one [frequency] ahead of the last datestamp in history (for example,
# one day after, or one hour after)
max_date_observed = self.history[self.__time_col].max()
date_range = pd.date_range(start=str(max_date_observed), periods=period+1, freq=frequency)
date_range = date_range[1:]
# Package everything into a dataframe
future_df = pd.DataFrame({self.__time_col: date_range,
self.__target_col: [np.nan] * date_range.shape[0]})
# Optionally add the history
if (include_history == True):
past_df = self.history.copy()
future_df = pd.concat([past_df, future_df], axis=0).reset_index(drop=True)
return future_df
########################################################################################################################
def __compute_seasonality(self, param_pairs, numeric_values, cycle_period):
'''
A function which accepts a tensor of coefficients in param pairs, the
time values and the cycle period, and returns a seasonal (cyclical)
tensor of the required seasonality using pairs of sin and cos
calculations.
Parameters:
------------
param_pairs - [tensor] A tensor of parameter pairs. The tensor should
always be of shape (N, 2) where N is the order
of the given seasonality.
numeric_values - [tensor] A tensor of the time values to be calculated.
This can be weekdays, which will range from 0-6,
month days which will range from 0-30 etc'
cycle_periods - [tensor] The period of the cycle. For weekdays, the cycle
repeats every 7 days, so the period will be
7. For months, it will be 31. For years, 366, etc'
Returns:
------------
seasonality - [tensor] The seasonal component specified by
the input parameters. e.g. weekly seasonality,
or yearly seasonality.
'''
seasonality = np.zeros_like(numeric_values, dtype=np.float32)
# Go through each parameter pair and apply it to
# a pair of sin and cos functions defining the
# seasonality.
for i, pair in enumerate(param_pairs):
cycle_order = i+1
sin_coef = pair[0]
cosin_coef = pair[1]
cycle_pos = cycle_order * 2 * np.pi * numeric_values/cycle_period
seasonality += (sin_coef * np.sin(cycle_pos)) + (cosin_coef * np.cos(cycle_pos))
return seasonality
########################################################################################################################
def get_seasonality(self, seasonality_name):
'''
A function which returns a tensor denoting the seasonality requested. If a
method not in [MAP, MLE] is requested, the function throws an error
Parameters:
------------
seasonality_name - [str] A string denoting the name of the seasonality
requested
Returns:
------------
seasonality - [DataFrame] A pandas dataframe of the requested
seasonality
'''
if (self.__method in ["MAP", "MLE"]):
if (seasonality_name == "weekly"):
seasonality = self.__get_weekly_seasonality_point(f'{self.__param_prefix}betas')
elif (seasonality_name == "monthly"):
seasonality = self.__get_monthly_seasonality_point(f'{self.__param_prefix}betas')
elif (seasonality_name == "yearly"):
seasonality = self.__get_yearly_seasonality_point(f'{self.__param_prefix}betas')
if (self.__seasonality_mode == "add"):
if (self.__y_max is not None):
seasonality['Y'] *= self.__y_max
elif (self.__seasonality_mode == "mul"):
seasonality['Y'] = 100*(1.0 + seasonality['Y'])
return seasonality
else:
raise NotImplementedError("Did not implement weekly seasonality for non MAP non MLE")
########################################################################################################################
def __get_seasonal_params(self, param_name):
'''
A function which accepts the name of the parameter store where
seasonality coefficients are stored, and the seasonality name,
and returns the coefficients corresponding to the requested
seasonality.
Parameters:
------------
param_name - [str] The name of the global param store where
the specific seasonality is stored.
Returns:
------------
seasonality_params - [tensor] A tensor of shape (N,2) where N
is the order of the requested
seasonality. This tensor contains
the seasonality coefficients.
'''
seasonal_params = []
for param in pyro.param(param_name):
seasonal_params.append(param.item())
# Reshape to have two columns. The parameters are assumed to be
# in order (sin_param1, cos_param1, sin_param2, cos_param2, ...)
seasonal_params = np.array(seasonal_params).reshape(-1, 2)
return seasonal_params
########################################################################################################################
def __get_weekly_seasonality_point(self, param_name):
'''
A function which accepts the name of the parameter where point estimates
of seasonalities are stored and returns a pandas dataframe containing
the data for the weekly seasonality as well axis labels
Parameters:
------------
param_name - [str] The name of the pyro parameter store where the
point estimates are stored
Returns:
------------
weekly_seasonality - [DataFrame] A pandas dataframe containing three
columns:
X - The values for the weekly seasonality (0-6)
Label - The labels for the days ("Monday" - "Sunday")
Y - The seasonal response for each day
'''
# Get the parameter pairs of coefficients
weekly_params = self.__get_seasonal_params(param_name+"_weekly")
# Monday is assumed to be 0
weekdays_numeric = np.arange(0, 7, 1)
weekdays = chronos_utils.weekday_names_
if (self.__trained_on_weekend == False):
weekdays_numeric = weekdays_numeric[:-2]
weekdays = weekdays[:-2]
# Compute seasonal response
seasonality = self.__compute_seasonality(weekly_params, weekdays_numeric, weekdays_numeric.shape[0])
# Package everything nicely into a df
weekly_seasonality = pd.DataFrame({"X": weekdays_numeric,
"Label": weekdays,
"Y": seasonality})
return weekly_seasonality
########################################################################################################################
def __get_monthly_seasonality_point(self, param_name):
'''
A function which accepts the name of the parameter where point estimates
of seasonalities are stored and returns a pandas dataframe containing
the data for the monthly seasonality as well axis labels
Parameters:
------------
param_name - [str] The name of the pyro parameter store where the
point estimates are stored
Returns:
------------
weekly_seasonality - [DataFrame] A pandas dataframe containing three
columns:
X - The values for the monthly seasonality (0-30)
Label - The labels for the days ("1st" - "31st")
Y - The seasonal response for each day
'''
# Get the parameter pairs of coefficients
monthly_params = self.__get_seasonal_params(param_name+"_monthly")
monthdays_numeric = np.arange(0, 31, 1)
monthday_names = chronos_utils.monthday_names_
# Compute seasonal response
seasonality = self.__compute_seasonality(monthly_params, monthdays_numeric, 31)
# Package everything nicely into a df
monthly_seasonality = pd.DataFrame({"X": monthdays_numeric,
"Label": monthday_names,
"Y": seasonality})
return monthly_seasonality
########################################################################################################################
def __get_yearly_seasonality_point(self, param_name):
'''
A function which accepts the name of the parameter where point estimates
of seasonalities are stored and returns a pandas dataframe containing
the data for the yearly seasonality as well axis labels
Parameters:
------------
param_name - [str] The name of the pyro parameter store where the
point estimates are stored
Returns:
------------
weekly_seasonality - [DataFrame] A pandas dataframe containing three
columns:
X - The values for the yearly seasonality
days (0-366)
Label - The labels for the days (the individual dates)
Y - The seasonal response for each day
'''
# Get the parameter pairs of coefficients
yearly_params = self.__get_seasonal_params(param_name+"_yearly")
yeardays_numeric = np.arange(0, 366, 1)
yearly_dates = | pd.date_range(start="01-01-2020", periods=366) | pandas.date_range |
import itertools
import re
import pandas as pd
import numpy as np
from catboost import Pool, FeaturesData
from constants import SCHOOLS_REVERSED, TARGET_LABELS
def _parse_str_nums(num_string):
"""
parse strings of numbers and take averages if there are multiple
:param num_string: a string of numbers and text
:type num_string: String
:return: float of the number found or average of multiple numbers found
:rtype: Float
:example:
>>> _parse_str_nums("40% to 50%")
>>> 45.
>>> _parse_str_nums("30%-50%")
>>> 40.
>>> _parse_str_nums("-20%")
>>> -20.
"""
num_string.upper().replace("ZERO", "0").replace("Forget it", "0")
# regex to find numbers
nums = re.findall(r'\d+', num_string)
# but if theres only one number, then we know its NOT a range and thus we can look for negative numbers
if len(nums) == 1:
nums = re.findall(r'[+-]?\d+(?:\.\d+)?', num_string)
# cast strings to ints
nums = [int(n) for n in nums]
# average ints derived from string
averaged = np.average(np.asarray(nums))
return averaged
def _squash_nested_lists(l_of_l):
"""
compress list of lists into one single list
:param l_of_l: list of lists
:type l_of_l: List
:return: single list with all elements of list of list
:rtype: List
:example:
>>> _squash_nested_list([['a','b'],['c'],['d','e']])
>>> ['a','b','c','d','e']
"""
return list(itertools.chain.from_iterable(l_of_l))
# TODO: do we care about case sensitivity?
def _preprocess_odds_string(string_of_odds):
"""
:param string_of_odds: string scraped from site describing an applicants odds of admittance
:type string_of_odds: String
:return: list of strings with entries for either schools or percent chances
:rtype: list
:example:
>>> _preprocess_odds_string("Harvard Business School: 85% Stanford: 80% Wharton: 90% Tuck: 95% Kellogg: 95%")
>>> ['Harvard Business School', '85', 'Stanford', '80', 'Wharton', '90', 'Tuck', '95', 'Kellogg', '95', '']
"""
# split on colons
divied_list_split_colon = string_of_odds.split(':')
# split on last occurrence of '%' using rsplit
divied_list_percent = [entry.rsplit('%', 1) for entry in divied_list_split_colon]
# recombine list of lists into one list of strings
divied_list_percent = _squash_nested_lists(divied_list_percent)
# split again on last occurence of new lines
# some snarky assessments have only text and no percent sign; i.e. "Forget it" or "Zero"
divied_list_of_lists = [entry.rsplit('\n', 1) for entry in divied_list_percent]
# recombine list of lists into one continuous list
compressed_divied_list = _squash_nested_lists(divied_list_of_lists)
# strip spaces for every entry
compressed_divied_list = [entry.strip() for entry in compressed_divied_list]
return compressed_divied_list
def _reduce_majors_dimensionality(data):
"""
The original dataset has a high number of majors specified
The dimensionality of the expanded numeric representation probably
hurts the model performance (in theory)
Thus we are reducing the dimensionality by combining all the stem into one category
and all the non stem into another category.
"""
stem_majors = ['Engineering', 'STEM']
# get all the majors that are not in the stem category
nonstem_majors = list(set(list(data.MAJOR.values)) - set(stem_majors))
majors_df = data.MAJOR
stem_replaced = majors_df.replace(to_replace=stem_majors, value=1.0)
new_majors_col = stem_replaced.replace(to_replace=nonstem_majors, value=0.0)
df_without_major_col = data.drop(['MAJOR'], axis=1, inplace=False)
reduced_df = df_without_major_col.join(pd.DataFrame({'STEM_MAJOR': new_majors_col}))
# print reduced_df
return reduced_df
def _reduce_race_dimensionality(data):
"""
The original dataset has a high number of races specified
The dimensionality of the expanded numeric representation probably
hurts the model performance (in theory)
Thus we are reducing the dimensionality by combining all the underrepresented into one category
and all the others into another
"""
underrepresented = ['Black', 'Latinx', 'Native American']
# get all the non-under represented races
non_underrepresented = list(set(list(data.RACE.values)) - set(underrepresented))
races_df = data.RACE
replace_races = races_df.replace(to_replace=underrepresented, value=1.0)
race_column = replace_races.replace(to_replace=non_underrepresented, value=0.0)
df_without_race_col = data.drop(['RACE'], axis=1, inplace=False)
reduced_df = df_without_race_col.join(pd.DataFrame({'UNDER_REP': race_column}))
return reduced_df
def _reduced_university_dimensionality(data):
"""
Use only binary classification. Tier 1 University Yes / No
"""
name_brand_schools = ['Tier 1', 'Tier 2']
small_schools = ['Tier 3']
uni_df = data.UNIVERSITY
replace_uni = uni_df.replace(to_replace=name_brand_schools, value=1.0)
uni_column = replace_uni.replace(to_replace=small_schools, value=0.0)
df_without_uni_col = data.drop(['UNIVERSITY'], axis=1, inplace=False)
reduced_df = df_without_uni_col.join(pd.DataFrame({'NAME_BRAND_SCHOOL': uni_column}))
return reduced_df
def _reduce_gender_dimensionality(data):
"""
Use only binary classification for simplifying dimensions
"""
gen_df = data.GENDER
replace_gen = gen_df.replace(to_replace=['Female'], value=1.0)
gen_column = replace_gen.replace(to_replace=['MALE'], value=0.0)
df_without_gen_col = data.drop(['GENDER'], axis=1, inplace=False)
reduced_df = df_without_gen_col.join(pd.DataFrame({'FEMALE': gen_column}))
return reduced_df
def _drop_unused_and_expand_categorical_columns(data):
"""
Drop data columns that were unused or have mostly NaNs
Expand categorical datas so they can be represented numerically
"""
# drop unused columns
data_after_drop = data.drop(['ODDS', 'INTERNATIONAL', 'JOBTITLE', 'AGE'], axis=1, inplace=False)
# dropped_data = data.drop(['ODDS','INTERNATIONAL','JOBTITLE','UNIVERSITY','MAJOR','GENDER','RACE'],axis=1,inplace=False)
# #change categorical data into numeric
# categorical_cols = ['UNIVERSITY','MAJOR','GENDER','RACE']
# # categorical_cols = []
# df_processed = pd.get_dummies(data=data_after_drop,columns=categorical_cols)
return data_after_drop
def preprocess_data_4_catboost(data_df, output_path=None):
"""
preprocess data for working with gradient boosting techniques
specifically with the catboost library. since this is going to use
the preprocessing built into the catboost library there are slightly
different steps to be done
"""
"""
train_data = Pool(
data=FeaturesData(
num_feature_data=np.array([[1, 4, 5, 6],
[4, 5, 6, 7],
[30, 40, 50, 60]],
dtype=np.float32),
cat_feature_data=np.array([[b"a", b"b"],
[b"a", b"b"],
[b"c", b"d"]],
dtype=object)
),
label=[1, 1, -1]
)
"""
new_df_w_labels = data_df.copy()
for idx, odds_string in data_df.ODDS.iteritems():
# skip data qual errors and abnormalities
if not isinstance(odds_string, str):
continue
divied_list = _preprocess_odds_string(odds_string)
for school_or_perc in divied_list:
if school_or_perc in SCHOOLS_REVERSED.keys():
school_idx = divied_list.index(school_or_perc)
# the percent is always the next index after the school
perc = divied_list[school_idx + 1]
# print "School: {};Odds: {}".format(school_or_perc,perc)
# use the standardized name
standard_school_name = SCHOOLS_REVERSED[school_or_perc]
# insert the specific name value for the correct row
new_df_w_labels.at[idx, standard_school_name] = _parse_str_nums(perc)
new_df_w_labels = _reduce_majors_dimensionality(new_df_w_labels)
# drop unused columns
data_after_drop = new_df_w_labels.drop(['ODDS', 'INTERNATIONAL', 'JOBTITLE'], axis=1, inplace=False)
# change categorical data into numeric
categorical_cols = ['UNIVERSITY', 'MAJOR', 'GENDER', 'RACE']
# a dataframe of ONLY the features
features_only_df = data_after_drop.drop(TARGET_LABELS, axis=1, inplace=False)
# determine the columns that are features by subtracting from labels
feature_cols = set(data_after_drop.columns) - set(TARGET_LABELS)
# a dataframe with ONLY labels
labels = data_after_drop.drop(feature_cols, axis=1, inplace=False)
multi_data_set_dict = {}
for school in labels.columns:
df_for_school = features_only_df.join( | pd.DataFrame({school: labels[school]}) | pandas.DataFrame |
# coding: utf-8
import numpy as np
import netCDF4 as nc
import pandas as pd
from glob import glob
from datetime import datetime
from os import path
from j24 import home
arm_dir = path.join(home(), 'DATA', 'arm')
SOUNDING_DIR = path.join(arm_dir, 'sounding')
GROUND_DIR = path.join(arm_dir, 'ground')
MWR_DIR = path.join(arm_dir, 'MWR')
all_soundings_f = 'tmpsondewnpnM1.b1.20140121.125200..20140330.172000.custom.cdf'
sounding_f = 'tmpsondewnpnM1.b1.20140131.115000.cdf' # sample
all_soundings_path = path.join(SOUNDING_DIR, all_soundings_f)
sounding_path = path.join(SOUNDING_DIR, sounding_f)
SOUNDING_GLOB = path.join(SOUNDING_DIR, 'tmpsondewnpnM1.b1.20??????.??????.cdf')
GROUND_GLOB = path.join(GROUND_DIR, 'tmpmetM1.b1.20??????.??????.cdf')
MWR_GLOB = path.join(MWR_DIR, '*.cdf')
#s = nc.Dataset(soundings_f)
def time(ncdata, as_np=False):
"""time from ARM netCDF"""
t0 = ncdata.variables['base_time'][0]
if as_np:
return np.array(t0 + ncdata.variables['time_offset'][:]).astype('datetime64[s]')
return | pd.to_datetime(t0 + ncdata.variables['time_offset'][:], unit='s') | pandas.to_datetime |
import unittest
import pandas as pd
import numpy as np
from scipy.sparse.csr import csr_matrix
from string_grouper.string_grouper import DEFAULT_MIN_SIMILARITY, \
DEFAULT_REGEX, DEFAULT_NGRAM_SIZE, DEFAULT_N_PROCESSES, DEFAULT_IGNORE_CASE, \
StringGrouperConfig, StringGrouper, StringGrouperNotFitException, \
match_most_similar, group_similar_strings, match_strings, \
compute_pairwise_similarities
from unittest.mock import patch, Mock
def mock_symmetrize_matrix(x: csr_matrix) -> csr_matrix:
return x
class SimpleExample(object):
def __init__(self):
self.customers_df = pd.DataFrame(
[
('BB016741P', 'Mega Enterprises Corporation', 'Address0', 'Tel0', 'Description0', 0.2),
('CC082744L', 'Hyper Startup Incorporated', '', 'Tel1', '', 0.5),
('AA098762D', 'Hyper Startup Inc.', 'Address2', 'Tel2', 'Description2', 0.3),
('BB099931J', 'Hyper-Startup Inc.', 'Address3', 'Tel3', 'Description3', 0.1),
('HH072982K', 'Hyper Hyper Inc.', 'Address4', '', 'Description4', 0.9),
('EE059082Q', 'Mega Enterprises Corp.', 'Address5', 'Tel5', 'Description5', 1.0)
],
columns=('Customer ID', 'Customer Name', 'Address', 'Tel', 'Description', 'weight')
)
self.customers_df2 = pd.DataFrame(
[
('BB016741P', 'Mega Enterprises Corporation', 'Address0', 'Tel0', 'Description0', 0.2),
('CC082744L', 'Hyper Startup Incorporated', '', 'Tel1', '', 0.5),
('AA098762D', 'Hyper Startup Inc.', 'Address2', 'Tel2', 'Description2', 0.3),
('BB099931J', 'Hyper-Startup Inc.', 'Address3', 'Tel3', 'Description3', 0.1),
('DD012339M', 'HyperStartup Inc.', 'Address4', 'Tel4', 'Description4', 0.1),
('HH072982K', 'Hyper Hyper Inc.', 'Address5', '', 'Description5', 0.9),
('EE059082Q', 'Mega Enterprises Corp.', 'Address6', 'Tel6', 'Description6', 1.0)
],
columns=('Customer ID', 'Customer Name', 'Address', 'Tel', 'Description', 'weight')
)
self.a_few_strings = pd.Series(['BB016741P', 'BB082744L', 'BB098762D', 'BB099931J', 'BB072982K', 'BB059082Q'])
self.one_string = pd.Series(['BB0'])
self.two_strings = pd.Series(['Hyper', 'Hyp'])
self.whatever_series_1 = pd.Series(['whatever'])
self.expected_result_with_zeroes = pd.DataFrame(
[
(1, 'Hyper Startup Incorporated', 0.08170638, 'whatever', 0),
(0, 'Mega Enterprises Corporation', 0., 'whatever', 0),
(2, 'Hyper Startup Inc.', 0., 'whatever', 0),
(3, 'Hyper-Startup Inc.', 0., 'whatever', 0),
(4, 'Hyper Hyper Inc.', 0., 'whatever', 0),
(5, 'Mega Enterprises Corp.', 0., 'whatever', 0)
],
columns=['left_index', 'left_Customer Name', 'similarity', 'right_side', 'right_index']
)
self.expected_result_centroid = pd.Series(
[
'Mega Enterprises Corporation',
'Hyper Startup Inc.',
'Hyper Startup Inc.',
'Hyper Startup Inc.',
'Hyper Hyper Inc.',
'Mega Enterprises Corporation'
],
name='group_rep_Customer Name'
)
self.expected_result_centroid_with_index_col = pd.DataFrame(
[
(0, 'Mega Enterprises Corporation'),
(2, 'Hyper Startup Inc.'),
(2, 'Hyper Startup Inc.'),
(2, 'Hyper Startup Inc.'),
(4, 'Hyper Hyper Inc.'),
(0, 'Mega Enterprises Corporation')
],
columns=['group_rep_index', 'group_rep_Customer Name']
)
self.expected_result_first = pd.Series(
[
'Mega Enterprises Corporation',
'Hyper Startup Incorporated',
'Hyper Startup Incorporated',
'Hyper Startup Incorporated',
'Hyper Hyper Inc.',
'Mega Enterprises Corporation'
],
name='group_rep_Customer Name'
)
class StringGrouperConfigTest(unittest.TestCase):
def test_config_defaults(self):
"""Empty initialisation should set default values"""
config = StringGrouperConfig()
self.assertEqual(config.min_similarity, DEFAULT_MIN_SIMILARITY)
self.assertEqual(config.max_n_matches, None)
self.assertEqual(config.regex, DEFAULT_REGEX)
self.assertEqual(config.ngram_size, DEFAULT_NGRAM_SIZE)
self.assertEqual(config.number_of_processes, DEFAULT_N_PROCESSES)
self.assertEqual(config.ignore_case, DEFAULT_IGNORE_CASE)
def test_config_immutable(self):
"""Configurations should be immutable"""
config = StringGrouperConfig()
with self.assertRaises(Exception) as _:
config.min_similarity = 0.1
def test_config_non_default_values(self):
"""Configurations should be immutable"""
config = StringGrouperConfig(min_similarity=0.1, max_n_matches=100, number_of_processes=1)
self.assertEqual(0.1, config.min_similarity)
self.assertEqual(100, config.max_n_matches)
self.assertEqual(1, config.number_of_processes)
class StringGrouperTest(unittest.TestCase):
def test_auto_blocking_single_DataFrame(self):
"""tests whether automatic blocking yields consistent results"""
# This function will force an OverflowError to occur when
# the input Series have a combined length above a given number:
# OverflowThreshold. This will in turn trigger automatic splitting
# of the Series/matrices into smaller blocks when n_blocks = None
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
# first do manual blocking
sg = StringGrouper(df1, min_similarity=0.1)
pd.testing.assert_series_equal(sg.master, df1)
self.assertEqual(sg.duplicates, None)
matches = fix_row_order(sg.match_strings(df1, n_blocks=(1, 1)))
self.assertEqual(sg._config.n_blocks, (1, 1))
# Create a custom wrapper for this StringGrouper instance's
# _build_matches() method which will later be used to
# mock _build_matches().
# Note that we have to define the wrapper here because
# _build_matches() is a non-static function of StringGrouper
# and needs access to the specific StringGrouper instance sg
# created here.
def mock_build_matches(OverflowThreshold,
real_build_matches=sg._build_matches):
def wrapper(left_matrix,
right_matrix,
nnz_rows=None,
sort=True):
if (left_matrix.shape[0] + right_matrix.shape[0]) > \
OverflowThreshold:
raise OverflowError
return real_build_matches(left_matrix, right_matrix, nnz_rows, sort)
return wrapper
def do_test_with(OverflowThreshold):
nonlocal sg # allows reference to sg, as sg will be modified below
# Now let us mock sg._build_matches:
sg._build_matches = Mock(side_effect=mock_build_matches(OverflowThreshold))
sg.clear_data()
matches_auto = fix_row_order(sg.match_strings(df1, n_blocks=None))
pd.testing.assert_series_equal(sg.master, df1)
pd.testing.assert_frame_equal(matches, matches_auto)
self.assertEqual(sg._config.n_blocks, None)
# Note that _build_matches is called more than once if and only if
# a split occurred (that is, there was more than one pair of
# matrix-blocks multiplied)
if len(sg._left_Series) + len(sg._right_Series) > \
OverflowThreshold:
# Assert that split occurred:
self.assertGreater(sg._build_matches.call_count, 1)
else:
# Assert that split did not occur:
self.assertEqual(sg._build_matches.call_count, 1)
# now test auto blocking by forcing an OverflowError when the
# combined Series' lengths is greater than 10, 5, 3, 2
do_test_with(OverflowThreshold=100) # does not trigger auto blocking
do_test_with(OverflowThreshold=10)
do_test_with(OverflowThreshold=5)
do_test_with(OverflowThreshold=3)
do_test_with(OverflowThreshold=2)
def test_n_blocks_single_DataFrame(self):
"""tests whether manual blocking yields consistent results"""
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
matches11 = fix_row_order(match_strings(df1, min_similarity=0.1))
matches12 = fix_row_order(
match_strings(df1, n_blocks=(1, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches12)
matches13 = fix_row_order(
match_strings(df1, n_blocks=(1, 3), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches13)
matches14 = fix_row_order(
match_strings(df1, n_blocks=(1, 4), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches14)
matches15 = fix_row_order(
match_strings(df1, n_blocks=(1, 5), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches15)
matches16 = fix_row_order(
match_strings(df1, n_blocks=(1, 6), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches16)
matches17 = fix_row_order(
match_strings(df1, n_blocks=(1, 7), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches17)
matches18 = fix_row_order(
match_strings(df1, n_blocks=(1, 8), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches18)
matches21 = fix_row_order(
match_strings(df1, n_blocks=(2, 1), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches21)
matches22 = fix_row_order(
match_strings(df1, n_blocks=(2, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches22)
matches32 = fix_row_order(
match_strings(df1, n_blocks=(3, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches32)
# Create a custom wrapper for this StringGrouper instance's
# _build_matches() method which will later be used to
# mock _build_matches().
# Note that we have to define the wrapper here because
# _build_matches() is a non-static function of StringGrouper
# and needs access to the specific StringGrouper instance sg
# created here.
sg = StringGrouper(df1, min_similarity=0.1)
def mock_build_matches(OverflowThreshold,
real_build_matches=sg._build_matches):
def wrapper(left_matrix,
right_matrix,
nnz_rows=None,
sort=True):
if (left_matrix.shape[0] + right_matrix.shape[0]) > \
OverflowThreshold:
raise OverflowError
return real_build_matches(left_matrix, right_matrix, nnz_rows, sort)
return wrapper
def test_overflow_error_with(OverflowThreshold, n_blocks):
nonlocal sg
sg._build_matches = Mock(side_effect=mock_build_matches(OverflowThreshold))
sg.clear_data()
max_left_block_size = (len(df1)//n_blocks[0]
+ (1 if len(df1) % n_blocks[0] > 0 else 0))
max_right_block_size = (len(df1)//n_blocks[1]
+ (1 if len(df1) % n_blocks[1] > 0 else 0))
if (max_left_block_size + max_right_block_size) > OverflowThreshold:
with self.assertRaises(Exception):
_ = sg.match_strings(df1, n_blocks=n_blocks)
else:
matches_manual = fix_row_order(sg.match_strings(df1, n_blocks=n_blocks))
pd.testing.assert_frame_equal(matches11, matches_manual)
test_overflow_error_with(OverflowThreshold=100, n_blocks=(1, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(1, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(2, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(1, 2))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(4, 4))
def test_n_blocks_both_DataFrames(self):
"""tests whether manual blocking yields consistent results"""
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df['Customer Name']
df2 = simple_example.customers_df2['Customer Name']
matches11 = fix_row_order(match_strings(df1, df2, min_similarity=0.1))
matches12 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches12)
matches13 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 3), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches13)
matches14 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 4), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches14)
matches15 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 5), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches15)
matches16 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 6), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches16)
matches17 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 7), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches17)
matches18 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 8), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches18)
matches21 = fix_row_order(
match_strings(df1, df2, n_blocks=(2, 1), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches21)
matches22 = fix_row_order(
match_strings(df1, df2, n_blocks=(2, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches22)
matches32 = fix_row_order(
match_strings(df1, df2, n_blocks=(3, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches32)
def test_n_blocks_bad_option_value(self):
"""Tests that bad option values for n_blocks are caught"""
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=2)
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(0, 2))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, 2.5))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, 2, 3))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, ))
def test_tfidf_dtype_bad_option_value(self):
"""Tests that bad option values for n_blocks are caught"""
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype=None)
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype=0)
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype='whatever')
def test_compute_pairwise_similarities(self):
"""tests the high-level function compute_pairwise_similarities"""
simple_example = SimpleExample()
df1 = simple_example.customers_df['<NAME>']
df2 = simple_example.expected_result_centroid
similarities = compute_pairwise_similarities(df1, df2)
expected_result = pd.Series(
[
1.0,
0.6336195351561589,
1.0000000000000004,
1.0000000000000004,
1.0,
0.826462625999832
],
name='similarity'
)
expected_result = expected_result.astype(np.float32)
pd.testing.assert_series_equal(expected_result, similarities)
sg = StringGrouper(df1, df2)
similarities = sg.compute_pairwise_similarities(df1, df2)
pd.testing.assert_series_equal(expected_result, similarities)
def test_compute_pairwise_similarities_data_integrity(self):
"""tests that an exception is raised whenever the lengths of the two input series of the high-level function
compute_pairwise_similarities are unequal"""
simple_example = SimpleExample()
df1 = simple_example.customers_df['<NAME>']
df2 = simple_example.expected_result_centroid
with self.assertRaises(Exception):
_ = compute_pairwise_similarities(df1, df2[:-2])
@patch('string_grouper.string_grouper.StringGrouper')
def test_group_similar_strings(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function group_similar_strings utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_groups.return_value = 'whatever'
test_series_1 = None
test_series_id_1 = None
df = group_similar_strings(
test_series_1,
string_ids=test_series_id_1
)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_groups.assert_called_once()
self.assertEqual(df, 'whatever')
@patch('string_grouper.string_grouper.StringGrouper')
def test_match_most_similar(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function match_most_similar utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_groups.return_value = 'whatever'
test_series_1 = None
test_series_2 = None
test_series_id_1 = None
test_series_id_2 = None
df = match_most_similar(
test_series_1,
test_series_2,
master_id=test_series_id_1,
duplicates_id=test_series_id_2
)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_groups.assert_called_once()
self.assertEqual(df, 'whatever')
@patch('string_grouper.string_grouper.StringGrouper')
def test_match_strings(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function match_strings utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_matches.return_value = 'whatever'
test_series_1 = None
test_series_id_1 = None
df = match_strings(test_series_1, master_id=test_series_id_1)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_matches.assert_called_once()
self.assertEqual(df, 'whatever')
@patch(
'string_grouper.string_grouper.StringGrouper._symmetrize_matrix',
side_effect=mock_symmetrize_matrix
)
def test_match_list_symmetry_without_symmetrize_function(self, mock_symmetrize_matrix_param):
"""mocks StringGrouper._symmetrize_matches_list so that this test fails whenever _matches_list is
**partially** symmetric which often occurs when the kwarg max_n_matches is too small"""
simple_example = SimpleExample()
df = simple_example.customers_df2['<NAME>']
sg = StringGrouper(df, max_n_matches=2).fit()
mock_symmetrize_matrix_param.assert_called_once()
# obtain the upper and lower triangular parts of the matrix of matches:
upper = sg._matches_list[sg._matches_list['master_side'] < sg._matches_list['dupe_side']]
lower = sg._matches_list[sg._matches_list['master_side'] > sg._matches_list['dupe_side']]
# switch the column names of lower triangular part (i.e., transpose) to convert it to upper triangular:
upper_prime = lower.rename(columns={'master_side': 'dupe_side', 'dupe_side': 'master_side'})
# obtain the intersection between upper and upper_prime:
intersection = upper_prime.merge(upper, how='inner', on=['master_side', 'dupe_side'])
# if the intersection is empty then _matches_list is completely non-symmetric (this is acceptable)
# if the intersection is not empty then at least some matches are repeated.
# To make sure all (and not just some) matches are repeated, the lengths of
# upper, upper_prime and their intersection should be identical.
self.assertFalse(intersection.empty or len(upper) == len(upper_prime) == len(intersection))
def test_match_list_symmetry_with_symmetrize_function(self):
"""This test ensures that _matches_list is symmetric"""
simple_example = SimpleExample()
df = simple_example.customers_df2['<NAME>']
sg = StringGrouper(df, max_n_matches=2).fit()
# Obtain the upper and lower triangular parts of the matrix of matches:
upper = sg._matches_list[sg._matches_list['master_side'] < sg._matches_list['dupe_side']]
lower = sg._matches_list[sg._matches_list['master_side'] > sg._matches_list['dupe_side']]
# Switch the column names of the lower triangular part (i.e., transpose) to convert it to upper triangular:
upper_prime = lower.rename(columns={'master_side': 'dupe_side', 'dupe_side': 'master_side'})
# Obtain the intersection between upper and upper_prime:
intersection = upper_prime.merge(upper, how='inner', on=['master_side', 'dupe_side'])
# If the intersection is empty this means _matches_list is completely non-symmetric (this is acceptable)
# If the intersection is not empty this means at least some matches are repeated.
# To make sure all (and not just some) matches are repeated, the lengths of
# upper, upper_prime and their intersection should be identical.
self.assertTrue(intersection.empty or len(upper) == len(upper_prime) == len(intersection))
@patch(
'string_grouper.string_grouper.StringGrouper._fix_diagonal',
side_effect=mock_symmetrize_matrix
)
def test_match_list_diagonal_without_the_fix(self, mock_fix_diagonal):
"""test fails whenever _matches_list's number of self-joins is not equal to the number of strings"""
# This bug is difficult to reproduce -- I mostly encounter it while working with very large datasets;
# for small datasets setting max_n_matches=1 reproduces the bug
simple_example = SimpleExample()
df = simple_example.customers_df['<NAME>']
matches = match_strings(df, max_n_matches=1)
mock_fix_diagonal.assert_called_once()
num_self_joins = len(matches[matches['left_index'] == matches['right_index']])
num_strings = len(df)
self.assertNotEqual(num_self_joins, num_strings)
def test_match_list_diagonal(self):
"""This test ensures that all self-joins are present"""
# This bug is difficult to reproduce -- I mostly encounter it while working with very large datasets;
# for small datasets setting max_n_matches=1 reproduces the bug
simple_example = SimpleExample()
df = simple_example.customers_df['Customer Name']
matches = match_strings(df, max_n_matches=1)
num_self_joins = len(matches[matches['left_index'] == matches['right_index']])
num_strings = len(df)
self.assertEqual(num_self_joins, num_strings)
def test_zero_min_similarity(self):
"""Since sparse matrices exclude zero elements, this test ensures that zero similarity matches are
returned when min_similarity <= 0. A bug related to this was first pointed out by @nbcvijanovic"""
simple_example = SimpleExample()
s_master = simple_example.customers_df['Customer Name']
s_dup = simple_example.whatever_series_1
matches = match_strings(s_master, s_dup, min_similarity=0)
pd.testing.assert_frame_equal(simple_example.expected_result_with_zeroes, matches)
def test_zero_min_similarity_small_max_n_matches(self):
"""This test ensures that a warning is issued when n_max_matches is suspected to be too small while
min_similarity <= 0 and include_zeroes is True"""
simple_example = SimpleExample()
s_master = simple_example.customers_df['Customer Name']
s_dup = simple_example.two_strings
with self.assertRaises(Exception):
_ = match_strings(s_master, s_dup, max_n_matches=1, min_similarity=0)
def test_get_non_matches_empty_case(self):
"""This test ensures that _get_non_matches() returns an empty DataFrame when all pairs of strings match"""
simple_example = SimpleExample()
s_master = simple_example.a_few_strings
s_dup = simple_example.one_string
sg = StringGrouper(s_master, s_dup, max_n_matches=len(s_master), min_similarity=0).fit()
self.assertTrue(sg._get_non_matches_list().empty)
def test_n_grams_case_unchanged(self):
"""Should return all ngrams in a string with case"""
test_series = pd.Series(pd.Series(['aaa']))
# Explicit do not ignore case
sg = StringGrouper(test_series, ignore_case=False)
expected_result = ['McD', 'cDo', 'Don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_n_grams_ignore_case_to_lower(self):
"""Should return all case insensitive ngrams in a string"""
test_series = pd.Series(pd.Series(['aaa']))
# Explicit ignore case
sg = StringGrouper(test_series, ignore_case=True)
expected_result = ['mcd', 'cdo', 'don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_n_grams_ignore_case_to_lower_with_defaults(self):
"""Should return all case insensitive ngrams in a string"""
test_series = pd.Series(pd.Series(['aaa']))
# Implicit default case (i.e. default behaviour)
sg = StringGrouper(test_series)
expected_result = ['mcd', 'cdo', 'don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_build_matrix(self):
"""Should create a csr matrix only master"""
test_series = pd.Series(['foo', 'bar', 'baz'])
sg = StringGrouper(test_series)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
c = csr_matrix([[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]])
np.testing.assert_array_equal(c.toarray(), master.toarray())
np.testing.assert_array_equal(c.toarray(), dupe.toarray())
def test_build_matrix_master_and_duplicates(self):
"""Should create a csr matrix for master and duplicates"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
master_expected = csr_matrix([[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 1., 0., 0.]])
dupes_expected = csr_matrix([[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 0., 1., 0.]])
np.testing.assert_array_equal(master_expected.toarray(), master.toarray())
np.testing.assert_array_equal(dupes_expected.toarray(), dupe.toarray())
def test_build_matches(self):
"""Should create the cosine similarity matrix of two series"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
expected_matches = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]])
np.testing.assert_array_equal(expected_matches, sg._build_matches(master, dupe)[0].toarray())
def test_build_matches_list(self):
"""Should create the cosine similarity matrix of two series"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
sg = sg.fit()
master = [0, 1]
dupe_side = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'master_side': master, 'dupe_side': dupe_side, 'similarity': similarity})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg._matches_list)
def test_case_insensitive_build_matches_list(self):
"""Should create the cosine similarity matrix of two case insensitive series"""
test_series_1 = pd.Series(['foo', 'BAR', 'baz'])
test_series_2 = pd.Series(['FOO', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
sg = sg.fit()
master = [0, 1]
dupe_side = [0, 1]
similarity = [1.0, 1.0]
expected_df = | pd.DataFrame({'master_side': master, 'dupe_side': dupe_side, 'similarity': similarity}) | pandas.DataFrame |
#coding=utf-8
import pandas as pd
import numpy as np
import sys
import os
from sklearn import preprocessing
import datetime
import scipy as sc
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.externals import joblib
#import joblib
class FEbase(object):
"""description of class"""
def __init__(self, **kwargs):
pass
def create(self,*DataSetName):
#print (self.__class__.__name__)
(filepath, tempfilename) = os.path.split(DataSetName[0])
(filename, extension) = os.path.splitext(tempfilename)
#bufferstring='savetest2017.csv'
bufferstringoutput=filepath+'/'+filename+'_'+self.__class__.__name__+extension
if(os.path.exists(bufferstringoutput)==False):
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
df_all=self.core(DataSetName)
df_all.to_csv(bufferstringoutput)
return bufferstringoutput
def core(self,df_all,Data_adj_name=''):
return df_all
def real_FE():
return 0
class FEg30eom0110network(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
intflag=True
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
if(intflag):
df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
if(intflag):
df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
if(intflag):
df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max',True)
df_all,_=FEsingle.HighLowRange(df_all,8,True)
df_all,_=FEsingle.HighLowRange(df_all,25,True)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
if(intflag):
df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
if(intflag):
df_all['pct_chg_abs_rank']=df_all['pct_chg_abs_rank']*10//2
df_all=FEsingle.PctChgAbsSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,3,True)
df_all=FEsingle.PctChgSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,12,True)
df_all=FEsingle.AmountChgRank(df_all,12,True)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
if(intflag):
df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FEg30eom0110onlinew6d(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_all['sm_amount']=df_all.groupby('ts_code')['sm_amount'].shift(1)
df_all['lg_amount']=df_all.groupby('ts_code')['lg_amount'].shift(1)
df_all['net_mf_amount']=df_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FE_a23(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
print(df_money_all)
df_all= | pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date']) | pandas.merge |
from typing import Union, List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.metrics import roc_auc_score, roc_curve
def plot_bars(df, path, title=None):
sns.set(style="whitegrid", font_scale=1.5)
pl = df.plot(figsize=(10, 10), kind='bar', cmap='Accent', width=1)
if title:
pl.title.set_text(title)
pl.get_figure().savefig(path, bbox_inches='tight')
plt.close()
def plot_roc_curve_image(y_true, y_pred, path):
sns.set(style="whitegrid", font_scale=1.5)
plt.figure(figsize=(10, 10))
fpr_reg, tpr_reg, _ = roc_curve(y_true, y_pred)
auc_score_reg = roc_auc_score(y_true, y_score=y_pred)
lw = 2
plt.plot(fpr_reg, tpr_reg, color='darkorange',
lw=lw, label='WhiteBox модель (GINI = {:.3f})'.format(2 * auc_score_reg - 1))
plt.plot([0, 1], [0, 1], color='red', lw=lw, linestyle='--', label='Random Model')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
lgd = plt.legend(bbox_to_anchor=(0.5, -0.15), loc='upper center', ncol=2)
plt.xticks(np.arange(0, 1.01, 0.05), rotation=45)
plt.yticks(np.arange(0, 1.01, 0.05))
plt.grid(color='gray', linestyle='-', linewidth=1)
plt.title('ROC кривая (GINI = {:.3f})'.format(2 * auc_score_reg - 1))
plt.savefig(path, bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close()
def plot_double_roc_curve(train_y_true, train_y_pred, test_y_true, test_y_pred, path):
sns.set(style="whitegrid", font_scale=1.5)
plt.figure(figsize=(10, 10))
train_fpr_reg, train_tpr_reg, _ = roc_curve(train_y_true, train_y_pred)
train_auc_score_reg = roc_auc_score(train_y_true, y_score=train_y_pred)
test_fpr_reg, test_tpr_reg, _ = roc_curve(test_y_true, test_y_pred)
test_auc_score_reg = roc_auc_score(test_y_true, y_score=test_y_pred)
lw = 2
plt.plot(
train_fpr_reg,
train_tpr_reg,
color='darkorange',
lw=lw,
label='По данным train (GINI = {:.3f})'.format(2 * train_auc_score_reg - 1)
)
plt.plot(
test_fpr_reg,
test_tpr_reg,
color='blue',
lw=lw,
label='По данным test (GINI = {:.3f})'.format(2 * test_auc_score_reg - 1)
)
plt.plot([0, 1], [0, 1], color='red', lw=lw, linestyle='--', label='Random Model')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc='lower right')
plt.xticks(np.arange(0, 1.01, 0.05), rotation=45)
plt.yticks(np.arange(0, 1.01, 0.05))
plt.grid(color='gray', linestyle='-', linewidth=1)
plt.title('ROC кривая')
plt.savefig(path, bbox_inches='tight')
plt.close()
def plot_roc_curve_feature_image(feature_name, y_true, y_pred, path):
sns.set(style="whitegrid", font_scale=1.5)
plt.figure(figsize=(10, 10))
fpr_reg, tpr_reg, _ = roc_curve(y_true, y_pred)
auc_score_reg = roc_auc_score(y_true, y_score=y_pred)
lw = 2
plt.plot(fpr_reg, tpr_reg, color='darkorange',
lw=lw, label=feature_name + ' (GINI = {:.3f})'.format(2 * auc_score_reg - 1))
plt.plot([0, 1], [0, 1], color='red', lw=lw, linestyle='--', label='Random Model')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
lgd = plt.legend(bbox_to_anchor=(0.5, -0.15), loc='upper center', ncol=2)
plt.xticks(np.arange(0, 1.01, 0.05), rotation=45)
plt.yticks(np.arange(0, 1.01, 0.05))
plt.grid(color='gray', linestyle='-', linewidth=1)
plt.title('ROC curve(GINI = {:.3f})'.format(2 * auc_score_reg - 1) + f" of feature {feature_name}")
plt.savefig(path, bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close()
def plot_model_weights(features, path):
sns.set(style="whitegrid", font_scale=1.5)
fig = plt.figure(figsize=(20, 5))
ax = fig.add_axes([0, 0, 1, 1])
ax.bar(features.index, features.values, color='g')
lgd = plt.legend(bbox_to_anchor=(0.5, -0.15), loc='upper center', ncol=2)
plt.title('Model coefs', fontsize=28)
plt.xlabel('Features', fontsize=20)
plt.ylabel('Coef values', fontsize=20)
plt.xticks(fontsize=15, rotation=90)
plt.yticks(fontsize=15)
plt.savefig(path, bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close()
def plot_feature_split(feature_name, features, path):
sns.set(style="whitegrid", font_scale=1.5)
fig = plt.figure(figsize=(15, 5))
ax = fig.add_axes([0, 0, 1, 1])
ax.bar(features.index, features.values, color='g')
lgd = plt.legend(bbox_to_anchor=(0.5, -0.15), loc='upper center', ncol=2)
plt.title("Split of feature " + feature_name + " and woe values")
plt.xlabel('Bins', fontsize=20)
plt.ylabel('WoE values', fontsize=20)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.savefig(path, bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close()
def plot_ginis(data_enc, target, path):
sns.set(style="whitegrid", font_scale=1.5)
feats = list(data_enc.columns)
aucs = [roc_auc_score(target, -data_enc[col].values) for col in feats]
ginis = [(x - 0.5) * 2 for x in aucs]
ginis = pd.Series(ginis, index=feats).sort_values(ascending=True)
pl = ginis.plot(kind='barh', figsize=(10, 10))
pl.get_figure().savefig(path, bbox_inches='tight')
plt.close()
def plot_woe_bars(train_enc, train_target, test_enc, test_target, target_name, column, path):
sns.set(style="whitegrid", font_scale=1.5)
names = ['train', 'test']
samples = []
for df, target in zip([train_enc, test_enc], [train_target, test_target]):
df_copy = df.copy().round(3)
df_copy[target_name] = target
samples.append(df_copy)
samples = [x[[target_name, column]].groupby(column)[target_name].agg(['mean', 'count']).reset_index()
for x in samples]
for df in samples:
df['count'] /= df['count'].sum()
df.rename({'count': 'Freq', 'mean': 'DefaultRate',
column: 'WOE: ' + column}, inplace=True, axis=1)
total = | pd.concat(samples, axis=0, ignore_index=True) | pandas.concat |
from re import X
from dash import Dash, dcc, html, Input, Output
import dash_bootstrap_components as dbc
import pandas as pd
import altair as alt
import os
alt.data_transformers.disable_max_rows()
# import data
# absolute path to this file
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
# absolute path to this file's root directory
PARENT_DIR = os.path.join(FILE_DIR, os.pardir)
dir_of_interest = os.path.join(PARENT_DIR, 'data')
raw_df = pd.read_csv(f"{dir_of_interest}/processed/medals.csv")
noc_list = pd.read_csv(f"{dir_of_interest}/processed/noc_list.csv")
# list of available olympics years for the year slider
# sets the label opacity to 0 to make invisible so you
# only see the tooltip for current year selection
years = list(raw_df['year'].unique())
years.sort()
slider_years = dict.fromkeys(map(int, years))
for year in years:
my_dict = {}
my_dict['label'] = str(year)
my_dict['style'] = {'opacity': 0}
slider_years[year] = my_dict
# list of the top 20 events
top20_events = (raw_df
.groupby("sport")
.count()['id']
.sort_values(ascending=False)
.head(20)
.index
.tolist())
# Creating the objects of the dashboard
season_checkbox = dcc.RadioItems(
id='season',
options=[
{'label': 'Summer', 'value': 'Summer'},
{'label': 'Winter', 'value': 'Winter'},
{'label': 'All', 'value': 'all'}],
value='all',
labelStyle={'display': 'block'})
medal_checklist = dcc.Checklist(
id='medal_type',
options=[
{'label': 'Gold', 'value': 'Gold'},
{'label': 'Silver', 'value': 'Silver'},
{'label': 'Bronze', 'value': 'Bronze'}],
value=['Gold', 'Silver', 'Bronze'],
labelStyle={'display': 'block'})
bubble_plot = html.Div([
html.Iframe(
id='scatter',
style={'border-width': '0', 'width': '140%', 'height': '420px'})
])
height_hist = html.Iframe(
id='height_hist',
style={'border-width': '0', 'width': '140%', 'height': '420px'})
year_slider = dcc.Slider(id='medals_by_country',
min=1896,
max=2016,
marks=slider_years,
step=None,
value=2000,
tooltip={"placement": "bottom", "always_visible": True},
included=False)
age_hist = html.Iframe(
id='age_hist',
style={'border-width': '0', 'width': '140%', 'height': '420px'})
age_slider = dcc.RangeSlider(id='age_slider',
min=0,
max=75,
step=1,
value=[0, 75],
marks=None,
tooltip={"placement": "bottom", "always_visible": True})
line_plot = html.Iframe(
id='line',
style={'border-width': '0', 'width': '140%', 'height': '420px'})
## Setup app and layout/frontend
#app = Dash(__name__, external_stylesheets=['https://codepen.io/chriddyp/pen/bWLwgP.css'])
app = Dash(__name__, external_stylesheets=[dbc.themes.MINTY])
server = app.server
app.title = "Olympic Dash"
app.layout = dbc.Container([
dbc.Toast(
[html.P(
"Here is the place for the historic olympic data and trends."
),
html.A(
"Data",
href="https://github.com/rfordatascience/tidytuesday",
style={"text-decoration": "underline", "color": "#074983"},
),
html.P(
"The dataset is from tidytuesday, which includes the data from 1896 up until 2016."
),
html.P(
"Please note that the Winter and Summer Games were held in the same year up until 1992. After that, they staggered them such that Winter Games occur on a four-year cycle starting with 1994, then Summer in 1996, then Winter in 1998, and so on."
),
html.A(
"GitHub Repo",
href="https://github.com/UBC-MDS/olympic-dash",
style={ "text-decoration": "underline", "color": "#074983"},
),
html.P(
"Please go to the GitHub repository for more information."
),],
id="toast",
header="About",
is_open=False,
dismissable=True,
style={
"position": "fixed",
"top": 75,
"right": 10,
"width": 400,
"z-index": "1"},
),
dbc.Row(
[
html.Div(
html.Img(src="assets/olympic_pic.png", height="80px"),
style ={"position" : "left",
"top": "20px",
"left": 0,
"width": 70, }
),
html.Div("Olympics Dashboard",
style={"font-size": "260%", "color":"#FFF","text-aligh":"right",
"padding": "0",
"white-space":"nowrap",
"position" : "left",
"top": 10,
"left": 90,
"width": 800,
},
),
dbc.Button(
"About",
id="toast-toggle",
color="#074983",
n_clicks=0,
style={
"white-space":"nowrap",
"top": 15,
"position" : "absolute",
"right":"20px",
'text-aligh':'center',
"width": 120,
"font-size": "120%"
}
)
],
id="header",
className="g-0",
style={
"background-image": "linear-gradient(to right, #074983, #ced2cc)",
"position": "absolute",
"width":"100%",
"left": 0,
"height":80,
}
),
html.Br(),
dbc.Row(
[
# dcc.Store stores the intermediate value
dcc.Store(id='filter_df'),
# filers
dbc.Col([
html.H3("Season"),
season_checkbox
], style={"position": "absolute", "left": 110, "top": 20}),
dbc.Col([
html.H3("Medal Type"),
medal_checklist,
], style={"position": "absolute", "left": 500, "top": 20}),
dbc.Col([
html.H3("Year"),
year_slider,
], style={"position": "absolute", "left": 900, "top": 20, "width": 500,}),
],
style={
"position": "absolute",
"top": 80,
"left": 0,
"width":"110%",
"height":160,
"background-color": "#E4EBF5",
},
),
html.Br(),
dbc.Row([
dbc.Col([
dbc.Card(
[
dbc.CardHeader(
html.Label("Total Medal Count",
style={"font-size":18, 'text_aligh': 'left', 'color': '#3F69A9', 'font-family': 'sans-serif'})),
html.Br(),
html.Br(),
dbc.Spinner(children = bubble_plot, color="primary")
],
style={"width": "39rem", "height": "36rem"},
),
dbc.Row([
html.Br()
]),
dbc.Card(
[
dbc.CardHeader(
html.Label("Athlete Height Distribution",
style={"font-size":18, 'text_aligh': 'left', 'color': '#3F69A9', 'font-family': 'sans-serif'})),
html.Br(),
html.Br(),
dbc.Spinner(children = height_hist, color="success")
],
style={"width": "39rem", "height": "33rem"},
)
],
style={
"position": "absolute",
"left": 100,
},
),
dbc.Col([
dbc.Card(
[
dbc.CardHeader(
html.Label("Olympic Medals Earned by Age Group",
style={"font-size":18, 'text_aligh': 'left', 'color': '#3F69A9', 'font-family': 'sans-serif'})
),
html.Br(),
dbc.Spinner(children = age_hist, color="warning"),
html.Div(
[
html.H6("Age Slider", style={'text-align': "center"}),
age_slider
],
)
],
style={"width": "39rem", "height": "36rem"}
),
dbc.Row([
html.Br()
]),
dbc.Card(
[
dbc.CardHeader(
html.Label("Medals Earned Over Time",
style={"font-size":18, 'text_aligh': 'left', 'color': '#3F69A9', 'font-family': 'sans-serif'})),
html.Br(),
html.Br(),
dbc.Spinner(children = line_plot, color="danger"),
],
style={"width": "39rem", "height": "33rem"},
)
],
style={
"position": "absolute",
"left": 770,
},
),
],
style={
"position": "absolute",
"top": 240,
"left": 0,
"width":"110%",
"height":1200,
"background-color": "#E4EBF5",
},
),
])
# dcc.Store stores the intermediate value
dcc.Store(id='filter_df')
# Set up callbacks/backend
@app.callback(
Output('filter_df', 'data'),
Input('season', 'value'),
Input('medal_type', 'value'))
def data_preprocess(season, medal_type):
temp_df = raw_df
filter = pd.DataFrame()
if season != 'all':
temp_df = temp_df[temp_df['season'] == season]
if len(medal_type) > 0:
for medal in medal_type:
temp = temp_df[temp_df['medal'] == medal]
filter = pd.concat([filter, temp])
return filter.to_json()
else:
return temp_df.to_json()
@app.callback(
Output('scatter', 'srcDoc'),
Input('filter_df', 'data'),
Input('medals_by_country', 'value'))
def plot_altair(filter_df, medals_by_country):
temp = pd.read_json(filter_df)
year = int(medals_by_country)
temp = temp[temp['year'] == year]
athlete_df = pd.read_csv(f"{dir_of_interest}/processed/athlete_count.csv")
athlete_df = athlete_df[athlete_df['year'] == year]
df = pd.DataFrame()
df = athlete_df.loc[:,['noc', 'athletes']].reset_index(drop = True)
df = df.join(temp.groupby(['noc'])['medal'].count(), on = 'noc')
df['ave_medals'] = df['medal'] / df['athletes']
df = df.dropna()
df = df.join(noc_list.reset_index()).reset_index()
chart = alt.Chart(df).mark_circle().encode(
x = alt.X('athletes', title = 'Athletes'),
y = alt.Y('ave_medals', title = 'Ave. Medals per Athlete'),
size = alt.Size('medal', legend=alt.Legend(
orient = 'right',
title='Total Medal Count'
)),
color = alt.Color('continent', legend = alt.Legend(
orient = 'right',
title = 'IOC Region')
),
tooltip='country'
).interactive()
return chart.to_html()
@app.callback(
Output('height_hist', 'srcDoc'),
Input('filter_df', 'data'),
Input('medals_by_country', 'value'),
Input('medal_type', 'value'))
def plot_altair(filter_df, medals_by_country, medal_type):
temp = pd.read_json(filter_df)
year = int(medals_by_country)
temp = temp[temp['year'] == year]
if type(medal_type) != list:
temp = temp[temp['medal'] == medal_type]
event_dropdown = alt.binding_select(options=top20_events)
event_select = alt.selection_single(fields=['sport'], bind=event_dropdown, name='Olympic')
chart = alt.Chart(temp).mark_bar().encode(
x=alt.X('height', bin=alt.Bin(maxbins=20), title="Athlete Height"),
y=alt.Y('count()',title="Count"),
tooltip = ['count()']
).add_selection(
event_select
).transform_filter(
event_select
)
# .properties(title="Athlete Height Distribution"
# )
return chart.to_html()
@app.callback(
Output('age_hist', 'srcDoc'),
Input('filter_df', 'data'),
Input('age_slider', 'value'),
Input('medals_by_country', 'value'),
Input('medal_type', 'value'))
def plot_altair(filter_df, age_slider, medals_by_country, medal_type):
temp = pd.read_json(filter_df)
minage = int(age_slider[0])
maxage = int(age_slider[1])
year = int(medals_by_country)
temp = temp[temp['year'] == year]
temp = temp[temp['age'].between(minage, maxage)]
temp["order"] = temp["medal"].replace({ 'Bronze' : 1, 'Silver' : 2, 'Gold' : 3 })
if type(medal_type) != list:
temp = temp[temp['medal'] == medal_type]
chart = alt.Chart(temp).mark_area(
interpolate='step'
).encode(
x=alt.X('age:Q', bin=alt.Bin(maxbins=100), title = "Athlete age range"),
y=alt.Y('count()', title = "Medals Earned"),
tooltip = ['count()'],
color=alt.Color('medal:N',
sort=["Gold", "Silver", "Bronze"],
scale=alt.Scale(
domain=['Bronze', 'Silver', 'Gold'],
range=['#CD7F32', '#C0C0C0', '#FFD700']),
legend=alt.Legend(orient='right')),
order=alt.Order('order', sort='ascending')
)
# .properties(
# title='Olympic medals earned by age group')
return chart.to_html()
@app.callback(
Output('line', 'srcDoc'),
Input('filter_df', 'data'))
def plot_altair(filter_df):
line_chart_df = | pd.read_json(filter_df) | pandas.read_json |
import pytest
import os
import sys
import json
from random import randint
from mercury_ml.common.artifact_storage.local import store_dict_json, store_pandas_json, store_pandas_pickle, \
store_h2o_frame
import shutil
input_dict = {"hello": [randint(0,100),randint(0,100),randint(0,100),randint(0,100)]}
dir = "./results"
if os.path.isdir(dir):
shutil.rmtree(dir)
os.makedirs(dir)
@pytest.mark.parametrize("input_dict, directory, filename, force, parts",
[(input_dict, dir, "test_h2o", False, 1),
(input_dict, dir, "test_h2o_dir", False, 2)])
def test_store_h2o_frame(input_dict, directory, filename, force, parts):
import pandas as pd
import h2o
h2o.init()
data = h2o.H2OFrame( | pd.DataFrame(input_dict) | pandas.DataFrame |
#! /usr/bin/env python3.5
from __future__ import print_function
import argparse
import csv
import pandas
import random
import numpy
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense, Dropout
from collections import Counter, OrderedDict
# Label regularization loss, according to Keras API
# Actually, our y_true is 1D, containing prior probabilities for
# our labels. But Keras API wants it to be a 2D array of shape
# (batch_size, num_classes)
# So, we expand it to 2D when calling train_on_batch (see below) and
# just take a mean
# in this function
def label_reg_loss(y_true, y_pred):
# KL-div
y_true = K.clip(y_true, K.epsilon(), 1)
y_pred = K.clip(y_pred, K.epsilon(), 1)
y_true_mean = K.mean(y_true, axis=0)
y_pred_mean = K.mean(y_pred, axis=0)
return K.sum(y_true_mean * K.log(y_true_mean / y_pred_mean), axis=-1)
if __name__ == '__main__':
random.seed(42)
parser = argparse.ArgumentParser(description="Train a DNN")
parser.add_argument("--save-model", default=None)
parser.add_argument("--min-spk-occ", default=5, type=int,
help="Keep speaker names that occur at least in that many shows")
parser.add_argument("--num-epochs", type=int, default=20,
help="Number of epochs to train")
parser.add_argument("--num-dev-shows", default=10, type=int,
help="Number of dev shows for validation")
parser.add_argument("--confidence-threshold", type=float, default=0.7,
help="Posterior probability threshold for confident predictions")
parser.add_argument("spk_file",
help="File speaker data (IDs and i-vectors) in CSV format")
parser.add_argument("meta_file", help="Metadata file CSV format")
args = parser.parse_args()
metadata_df = | pandas.read_csv(args.meta_file, sep=";", encoding='utf-8-sig') | pandas.read_csv |
# coding: utf-8
import numpy as np
from itertools import product
from collections import Counter
import pandas as pd
import os
import re
import json
import openslide
from matplotlib import pyplot as plt
import cv2
#cell#
from extract_rois_svs_xml import extract_rois_svs_xml
from slideutils import (plot_contour, get_median_color, get_thumbnail_magnification,
CropRotateRoi, get_img_bbox, get_rotated_highres_roi,
get_uniform_tiles,
get_contour_centre, read_roi_patches_from_slide,
clip_roi_wi_bbox, sample_points_wi_contour,
remove_outlier_vertices)
#cell#
# ## Read XML ROI, convert, and save as JSON
#cell#
fnxml = "examples/6371/6371 1.xml"
fnsvs = re.sub(".xml$", ".svs", fnxml)
#cell#
fnjson = extract_rois_svs_xml(fnxml)
#cell#
with open(fnjson,'r') as fh:
roilist = json.load(fh)
#cell#
| pd.Series([roi["name"] for roi in roilist]) | pandas.Series |
"""Tests for system parameter identification functions."""
import pytest
import pandas as pd
import numpy as np
import pvlib
from pvlib import location, pvsystem, tracking, modelchain, irradiance
from pvanalytics import system
from .conftest import requires_pvlib
@pytest.fixture(scope='module')
def summer_times():
"""One hour time stamps from May 1 through September 30, 2020 in GMT+7"""
return pd.date_range(
start='2020-5-1',
end='2020-10-1',
freq='H',
closed='left',
tz='Etc/GMT+7'
)
@pytest.fixture(scope='module')
def summer_clearsky(summer_times, albuquerque):
"""Clearsky irradiance for `sumer_times` in Albuquerque, NM."""
return albuquerque.get_clearsky(summer_times, model='simplified_solis')
@pytest.fixture
def summer_ghi(summer_clearsky):
"""Clearsky GHI for Summer, 2020 in Albuquerque, NM."""
return summer_clearsky['ghi']
@pytest.fixture
def summer_power_fixed(summer_clearsky, albuquerque, array_parameters,
system_parameters):
"""Simulated power from a FIXED PVSystem in Albuquerque, NM."""
pv_system = pvsystem.PVSystem(surface_azimuth=180,
surface_tilt=albuquerque.latitude,
**array_parameters, **system_parameters)
mc = modelchain.ModelChain(
pv_system,
albuquerque,
)
mc.run_model(summer_clearsky)
try:
ac = mc.results.ac
except AttributeError:
ac = mc.ac # pvlib < 0.9.0
return ac
@pytest.fixture
def summer_power_tracking_old_pvlib(summer_clearsky, albuquerque,
array_parameters, system_parameters):
"""Simulated power for a TRACKING PVSystem in Albuquerque"""
# copy of `summer_power_tracking` but with older pvlib API
# TODO: remove when minimum pvlib version is >= 0.9.0
pv_system = tracking.SingleAxisTracker(**array_parameters,
**system_parameters)
mc = modelchain.ModelChain(
pv_system,
albuquerque
)
mc.run_model(summer_clearsky)
return mc.ac
@pytest.fixture
def summer_power_tracking(summer_clearsky, albuquerque, array_parameters,
system_parameters):
"""Simulated power for a TRACKING PVSystem in Albuquerque"""
array = pvsystem.Array(pvsystem.SingleAxisTrackerMount(),
**array_parameters)
system = pvsystem.PVSystem(arrays=[array],
**system_parameters)
mc = modelchain.ModelChain(
system,
albuquerque
)
mc.run_model(summer_clearsky)
return mc.results.ac
def test_ghi_tracking_envelope_fixed(summer_ghi):
"""Clearsky GHI for a system that is FIXED."""
assert system.is_tracking_envelope(
summer_ghi,
summer_ghi > 0,
pd.Series(False, index=summer_ghi.index)
) is system.Tracker.FIXED
def test_power_tracking_envelope_fixed(summer_power_fixed):
"""Simulated system under clearsky condidtions is FIXED."""
assert system.is_tracking_envelope(
summer_power_fixed,
summer_power_fixed > 0,
pd.Series(False, index=summer_power_fixed.index)
) is system.Tracker.FIXED
@requires_pvlib('<0.9.0', reason="SingleAxisTracker deprecation")
def test_power_tracking_envelope_tracking_old_pvlib(
summer_power_tracking_old_pvlib):
"""Simulated single axis tracker is identified as TRACKING."""
# copy of `test_power_tracking_envelope_tracking` but with older pvlib API
# TODO: remove when minimum pvlib version is >= 0.9.0
assert system.is_tracking_envelope(
summer_power_tracking_old_pvlib,
summer_power_tracking_old_pvlib > 0,
pd.Series(False, index=summer_power_tracking_old_pvlib.index)
) is system.Tracker.TRACKING
@requires_pvlib('>=0.9.0', reason="Array class")
def test_power_tracking_envelope_tracking(summer_power_tracking):
"""Simulated single axis tracker is identified as TRACKING."""
assert system.is_tracking_envelope(
summer_power_tracking,
summer_power_tracking > 0,
pd.Series(False, index=summer_power_tracking.index)
) is system.Tracker.TRACKING
def test_high_clipping_unknown_tracking_envelope(summer_power_fixed):
"""If the amount of clipping is high then tracking is UNKNOWN"""
clipping = pd.Series(False, index=summer_power_fixed.index)
# 50% clipping
clipping.iloc[0:len(clipping) // 2] = True
assert system.is_tracking_envelope(
summer_power_fixed,
summer_power_fixed > 0,
clipping,
clip_max=0.4
) is system.Tracker.UNKNOWN
@pytest.mark.filterwarnings("ignore:invalid value encountered in",
"ignore:divide by zero encountered in")
def test_constant_unknown_tracking_envelope(summer_ghi):
"""A constant signal has unknown tracking."""
constant = pd.Series(1, index=summer_ghi.index)
assert system.is_tracking_envelope(
constant,
pd.Series(True, index=summer_ghi.index),
pd.Series(False, index=summer_ghi.index),
) is system.Tracker.UNKNOWN
@requires_pvlib('<0.9.0', reason="SingleAxisTracker deprecation")
@pytest.mark.filterwarnings("ignore:invalid value encountered in",
"ignore:divide by zero encountered in")
def test_median_mismatch_tracking_old_pvlib(summer_power_tracking_old_pvlib):
"""If the median does not have the same fit as the 99.5% quantile then
tracking is UNKNOWN."""
# copy of `test_median_mismatch_tracking` but with older pvlib API
# TODO: remove when minimum pvlib version is >= 0.9.0
power_half_tracking = summer_power_tracking_old_pvlib.copy()
power_half_tracking.iloc[0:100*24] = 1
assert system.is_tracking_envelope(
power_half_tracking,
pd.Series(True, index=power_half_tracking.index),
pd.Series(False, index=power_half_tracking.index),
fit_median=False
) is system.Tracker.TRACKING
assert system.is_tracking_envelope(
power_half_tracking,
pd.Series(True, index=power_half_tracking.index),
pd.Series(False, index=power_half_tracking.index)
) is system.Tracker.UNKNOWN
@requires_pvlib('>=0.9.0', reason="Array class")
@pytest.mark.filterwarnings("ignore:invalid value encountered in",
"ignore:divide by zero encountered in")
def test_median_mismatch_tracking(summer_power_tracking):
"""If the median does not have the same fit as the 99.5% quantile then
tracking is UNKNOWN."""
power_half_tracking = summer_power_tracking.copy()
power_half_tracking.iloc[0:100*24] = 1
assert system.is_tracking_envelope(
power_half_tracking,
pd.Series(True, index=power_half_tracking.index),
pd.Series(False, index=power_half_tracking.index),
fit_median=False
) is system.Tracker.TRACKING
assert system.is_tracking_envelope(
power_half_tracking,
pd.Series(True, index=power_half_tracking.index),
| pd.Series(False, index=power_half_tracking.index) | pandas.Series |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
assert isinstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
assert len(result) == 3
tm.assert_frame_equal( | pd.concat(result) | pandas.concat |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = | pd.concat([rides, dummies], axis=1) | pandas.concat |
#!/usr/bin/env python
"""
DataExplore pluin differential expression using R
Created June 2017
Copyright (C) <NAME>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 3
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from __future__ import absolute_import, division, print_function
import sys,os
import subprocess
import numpy as np
from pandastable.plugin import Plugin
from pandastable import core, plotting, dialogs
try:
from tkinter import *
from tkinter.ttk import *
except:
from Tkinter import *
from ttk import *
import pandas as pd
import pylab as plt
from mpl_toolkits.mplot3d import Axes3D
from collections import OrderedDict
class MultivariatePlugin(Plugin):
"""Plugin for DataExplore"""
capabilities = [] #['gui','uses_sidepane']
requires = ['']
menuentry = 'Multivariate Analysis'
gui_methods = {}
version = '0.1'
def __init__(self):
self.result = None
return
def main(self, parent):
if parent==None:
return
self.parent = parent
self._doFrame()
grps = {'data':['class_labels','target_col','use_selected'],
'options':['analysis','transform','3d_plot'] }
self.groups = grps = OrderedDict(grps)
kinds = ['']
methods = ['PCA','LDA','MDS','logistic_regression']#,'feature selection']
transforms = ['','log']
sheets = self.parent.getSheetList()
self.opts = {'class_labels': {'type':'combobox','default':'','items':sheets},
'target_col': {'type':'combobox','default':'','items':[]},
'analysis': {'type':'combobox','default':'PCA','items':methods},
'use_selected': {'type':'checkbutton','default':False,'label':'use selected data'},
'transform': {'type':'combobox','default':'','items':transforms},
'3d_plot': {'type':'checkbutton','default':False,'label':'3d plot'},
}
fr = self._createWidgets(self.mainwin)
fr.pack(side=LEFT,fill=BOTH)
bf = Frame(self.mainwin, padding=2)
bf.pack(side=LEFT,fill=BOTH)
b = Button(bf, text="Run", command=self.run)
b.pack(side=TOP,fill=X,pady=2)
b = Button(bf, text="View Results", command=self.showResults)
b.pack(side=TOP,fill=X,pady=2)
bf = Frame(self.mainwin, padding=2)
bf.pack(side=LEFT,fill=BOTH)
b = Button(bf, text="Refresh", command=self.update)
b.pack(side=TOP,fill=X,pady=2)
b = Button(bf, text="Close", command=self.quit)
b.pack(side=TOP,fill=X,pady=2)
b = Button(bf, text="Help", command=self.online_help)
b.pack(side=TOP,fill=X,pady=2)
self.update()
sheet = self.parent.getCurrentSheet()
#reference to parent frame in sheet
pw = self.parent.sheetframes[sheet]
self.pf = self.table.pf
return
def applyOptions(self):
"""Set the options"""
kwds = {}
for i in self.opts:
if self.opts[i]['type'] == 'listbox':
items = self.widgets[i].curselection()
kwds[i] = [self.widgets[i].get(j) for j in items]
print (items, kwds[i])
else:
kwds[i] = self.tkvars[i].get()
self.kwds = kwds
return
def _createWidgets(self, parent, callback=None):
"""Auto create tk vars, widgets for corresponding options and
and return the frame"""
dialog, self.tkvars, self.widgets = plotting.dialogFromOptions(parent,
self.opts, self.groups)
#self.widgets['class_labels'].bind("<<ComboboxSelected>>", self.update)
return dialog
def update(self, evt=None):
"""Update data widget(s)"""
self.table = self.parent.getCurrentTable()
df = self.table.model.df
cols = list(df.columns)
cols += ''
#self.widgets['sample_labels']['values'] = self.parent.getSheetList()
self.widgets['class_labels']['values'] = cols
self.widgets['target_col']['values'] = cols
return
def run(self):
"""Run chosen method"""
import sklearn
method = self.tkvars['analysis'].get()
sel = self.tkvars['use_selected'].get()
cats = self.tkvars['class_labels'].get()
target = self.tkvars['target_col'].get()
plot3d = self.tkvars['3d_plot'].get()
transform = self.tkvars['transform'].get()
if sel == 1:
data = self.table.getSelectedDataFrame()
else:
data = self.table.model.df
#setup plot
self.pf._initFigure()
if plot3d == True:
fig = self.pf.fig
ax = self.pf.ax = ax = Axes3D(fig)
else:
ax = self.pf.ax
self.pf.mplopts.applyOptions()
self.pf.labelopts.applyOptions()
opts = self.pf.mplopts.kwds
lopts = self.pf.labelopts.kwds
#print (opts)
X = pre_process(data, transform=transform)
result = None
if cats != '':
X = X.set_index(cats)
print (X)
if method == 'PCA':
pX, result = do_pca(X=X)
plot_matrix(pX, ax=ax, plot3d=plot3d, **opts)
elif method == 'LDA':
pX, result = do_lda(X=X)
plot_matrix(pX, ax=ax, plot3d=plot3d, **opts)
elif method == 'MDS':
pX, result = do_mds(X=X)
plot_matrix(pX, ax=ax, plot3d=plot3d, **opts)
elif method == 'feature selection':
pX = feature_selection(X)#, y=y)
elif method == 'logistic_regression':
pX = logistic_regression(X, ax, **opts)
self.result_obj = result
self.result_mat = pX
self.pf.ax.set_title(lopts['title'])
self.pf.canvas.draw()
return
def showResults(self):
import sklearn
df = self.result_mat
result = self.result_obj
if df is None:
return
w = self.resultswin = Toplevel(width=600,height=800)
w.title('results')
fr=Frame(w)
fr.pack(fill=BOTH,expand=1)
if type(result) is sklearn.decomposition.pca.PCA:
print (result.components_)
elif type(result) is sklearn.discriminant_analysis.LinearDiscriminantAnalysis:
print (result)
t = core.Table(fr, dataframe=df, showtoolbar=True)
t.show()
return
def clustermap(self):
data = self.data
res = res = self.getFiltered()
cluster_map(data, res.name)
plt.show()
return
def quit(self, evt=None):
"""Override this to handle pane closing"""
self.mainwin.destroy()
return
def online_help(self,event=None):
"""Open the online documentation"""
import webbrowser
link='https://github.com/dmnfarrell/pandastable/wiki'
webbrowser.open(link,autoraise=1)
return
def pre_process(X, transform='log'):
X = X._get_numeric_data()
if transform == 'log':
X = X+1
X = np.log(X)
#print (X)
X = X.fillna(0)
return X
def do_pca(X, c=3):
"""Do PCA"""
from sklearn import preprocessing
from sklearn.decomposition.pca import PCA, RandomizedPCA
#do PCA
#S = standardize_data(X)
#remove non numeric
X = X._get_numeric_data()
S = pd.DataFrame(preprocessing.scale(X),columns = X.columns)
pca = PCA(n_components=c)
pca.fit(S)
out = 'explained variance %s' %pca.explained_variance_ratio_
print (out)
#print pca.components_
w = pd.DataFrame(pca.components_,columns=S.columns)
print (w.T.max(1).sort_values())
pX = pca.fit_transform(S)
pX = pd.DataFrame(pX,index=X.index)
return pX, pca
def plot_matrix(pX, plot3d=False, palette='Spectral', labels=False, ax=None,
colors=None, **kwargs):
"""Plot PCA result, input should be a dataframe"""
if ax==None:
fig,ax = plt.subplots(1,1,figsize=(6,6))
#print (kwargs)
colormap = kwargs['colormap']
fs = kwargs['fontsize']
ms = kwargs['ms']*12
kwargs = {k:kwargs[k] for k in ('linewidth','alpha')}
cats = pX.index.unique()
import seaborn as sns
colors = sns.mpl_palette(colormap, len(cats))
for c, i in zip(colors, cats):
print (i, len(pX.ix[i]))
if plot3d == True:
ax.scatter(pX.ix[i, 0], pX.ix[i, 1], pX.ix[i, 2], color=c, s=ms, label=i,
edgecolor='black', **kwargs)
else:
ax.scatter(pX.ix[i, 0], pX.ix[i, 1], color=c, s=ms, label=i,
edgecolor='black', **kwargs)
ax.set_xlabel('PC1')
ax.set_ylabel('PC2')
if labels == True:
for i, point in pX.iterrows():
ax.text(point[0]+.3, point[1]+.3, str(i),fontsize=(9))
if len(cats)<20:
ax.legend(fontsize=fs*.8)
return
def do_lda(X, c=3):
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
idx = X.index
cla = pd.Categorical(idx)
y = cla.codes
X = X._get_numeric_data()
lda = LinearDiscriminantAnalysis(n_components=c)
pX = lda.fit(X, y).transform(X)
pX = pd.DataFrame(pX,index=idx)
return pX, lda
def do_mds(X, c=3):
"""Do MDS"""
X = X._get_numeric_data()
from sklearn import manifold
seed = np.random.RandomState(seed=3)
mds = manifold.MDS(n_components=c, max_iter=500, eps=1e-9, random_state=seed,
n_jobs=1)
pX = mds.fit(X.values).embedding_
pX = pd.DataFrame(pX,index=X.index)
return pX, mds
def feature_selection(X, y=None):
"""feature selection"""
if y is None:
idx = X.index
cla = | pd.Categorical(idx) | pandas.Categorical |
import numpy as np
import scipy as sp
import pandas as pd
import scipy.stats
import scanpy
import csv
import glob
import random
from sklearn import preprocessing
min_max_scaler = preprocessing.MinMaxScaler()
# <- todo
predefined_col_order = "./data/col_index"
string_gene_list_pwd = "./data/genesort_string_hit.txt"
# ->
def sample_test_split(geo, num_of_class_test, num_of_example, num_of_testing, string_set, sorting, label_dic=False, pp=False):
class_folders = geo.keys()
class_folders = random.sample(class_folders, num_of_class_test)
if label_dic:
labels_to_text = label_dic
labels_converter = {value:key for key, value in label_dic.items()}
else:
labels_converter = np.array(range(len(class_folders)))
labels_converter = dict(zip(class_folders, labels_converter))
labels_to_text = {value:key for key, value in labels_converter.items()}
example_set = pd.DataFrame()
test_set = pd.DataFrame()
example_label = []
test_label = []
# balance sampler
for subtype in labels_converter.keys():
this_exp = geo[subtype]
if (len(this_exp.index.intersection(string_set)) == 0):
this_exp = this_exp.transpose()
assert(len(this_exp.index.intersection(string_set)) != 0), "exp array has not symbol"
this_exp = this_exp[~this_exp.index.duplicated(keep='first')]
total_colno = (this_exp.shape)[1]
col_nu = list(range(total_colno) )
random.shuffle(col_nu)
assert(len(col_nu) >= num_of_example+num_of_testing), [total_colno, num_of_example+num_of_testing, subtype]
example_ids = col_nu[0 : num_of_example]
ex = this_exp.iloc[:,example_ids]
test_ids = col_nu[num_of_example : num_of_example + num_of_testing]
te = this_exp.iloc[:,test_ids]
#ex = np.log(ex+1.0)
#ex = np.clip(ex, 1, np.max(ex)[1])
#ex = ex.transpose()
#te = np.log(te+1.0)
#te = np.clip(te, 1, np.max(te)[1])
#te = te.transpose()
example_set = pd.concat([example_set,ex],axis=1)
test_set = pd.concat([test_set, te],axis=1)
example_label += [labels_converter[subtype]] * num_of_example
test_label += [labels_converter[subtype]] * num_of_testing
if string_set is not None:
example_set = example_set.transpose()
example_set = example_set.filter(items=string_set)
example_set = example_set.transpose()
test_set = test_set.transpose()
test_set = test_set.filter(items=string_set)
test_set = test_set.transpose()
out_ex = pd.DataFrame(index=string_set)
out_ex = pd.concat([out_ex, example_set],axis=1)
out_ex = out_ex.replace(np.nan,0)
test_set = test_set.transpose()
test_set['label'] = test_label
test_set = test_set.sample(frac=1)
test_label = test_set['label']
test_set = test_set.drop(columns='label')
test_set = test_set.transpose()
out_te = pd.DataFrame(index=string_set)
out_te = | pd.concat([out_te,test_set], axis=1) | pandas.concat |
# fetch_california_housing
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# 加载数据集
california = fetch_california_housing()
# 数据格式化的一些操作
pd.set_option('precision', 4) # 精度设置
pd.set_option('max_columns', 9) # 最大显示列(8个特征和针对目标房价中值添加的一列(california.target))
pd.set_option('display.width', None) # 指定字符宽度
# 创建DataFrame
california_df = pd.DataFrame(california.data, columns=california.feature_names)
# california.target中存储的中值房价添加一列
california_df['MedHouseValue'] = | pd.Series(california.target) | pandas.Series |
import string
import numpy as np
import pandas as pd
from pandas import DataFrame
import pandas._testing as tm
from pandas.api.types import (
is_extension_array_dtype,
pandas_dtype,
)
from .pandas_vb_common import (
datetime_dtypes,
extension_dtypes,
numeric_dtypes,
string_dtypes,
)
_numpy_dtypes = [
np.dtype(dtype) for dtype in (numeric_dtypes + datetime_dtypes + string_dtypes)
]
_dtypes = _numpy_dtypes + extension_dtypes
class Dtypes:
params = _dtypes + list(map(lambda dt: dt.name, _dtypes))
param_names = ["dtype"]
def time_pandas_dtype(self, dtype):
pandas_dtype(dtype)
class DtypesInvalid:
param_names = ["dtype"]
params = ["scalar-string", "scalar-int", "list-string", "array-string"]
data_dict = {
"scalar-string": "foo",
"scalar-int": 1,
"list-string": ["foo"] * 1000,
"array-string": np.array(["foo"] * 1000),
}
def time_pandas_dtype_invalid(self, dtype):
try:
pandas_dtype(self.data_dict[dtype])
except TypeError:
pass
class SelectDtypes:
params = [
tm.ALL_INT_NUMPY_DTYPES
+ tm.ALL_INT_EA_DTYPES
+ tm.FLOAT_NUMPY_DTYPES
+ tm.COMPLEX_DTYPES
+ tm.DATETIME64_DTYPES
+ tm.TIMEDELTA64_DTYPES
+ tm.BOOL_DTYPES
]
param_names = ["dtype"]
def setup(self, dtype):
N, K = 5000, 50
self.index = tm.makeStringIndex(N)
self.columns = tm.makeStringIndex(K)
def create_df(data):
return | DataFrame(data, index=self.index, columns=self.columns) | pandas.DataFrame |
import itertools
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from skillmodels.params_index import get_params_index
from skillmodels.parse_params import create_parsing_info
from skillmodels.parse_params import parse_params
from skillmodels.process_data import process_data
from skillmodels.process_debug_data import create_state_ranges
from skillmodels.process_model import process_model
def visualize_transition_equations(
model_dict,
params,
states,
period,
state_ranges=None,
quantiles_of_other_factors=(0.25, 0.5, 0.75),
plot_marginal_effects=False,
n_points=50,
n_draws=50,
sharex=False,
sharey="row",
data=None,
):
"""Visualize transition equations.
Args:
model_dict (dict): The model specification. See: :ref:`model_specs`
params (pandas.DataFrame): DataFrame with model parameters.
states (pandas.DataFrame): Tidy DataFrame with filtered or simulated states.
They are used to estimate the state ranges in each period (if state_ranges
are not given explicitly) and to estimate the distribution of the factors
that are not visualized.
period (int): The start period of the transition equations that are plotted.
combine_plots_in_grid (boolen): Return a figure containing subplots for each
pair of factors or a dictionary of individual plots. Default True.
state_ranges (dict): The keys are the names of the latent factors.
The values are DataFrames with the columns "period", "minimum", "maximum".
The state_ranges are used to define the axis limits of the plots.
quantiles_of_other_factors (float, list or None): Quantiles at which the factors
that are not varied in a given plot are fixed. If None, those factors are
not fixed but integrated out.
n_points (int): Number of grid points per input. Default 50.
n_draws (int): Number of randomly drawn values of the factors that are averaged
out. Only relevant if quantiles_of_other_factors is *None*. Default 50.
sharex (bool or {'none', 'all', 'col'}): Whether to share the properties of
x-axis across subplots. See API docs of matplotlib.pyplot.subplots.
Default False.
sharey (bool or {'none', 'all', 'row'}): : Whether to share the properties of
y-axis across subplots. See API docs of matplotlib.pyplot.subplots.
Default 'row'.
data (pd.DataFrame): Empirical dataset that is used to estimate the model. Only
needed if the model has observed factors. Those factors are directly taken
from the data to calculate their quantiles or averages.
Returns:
matplotlib.Figure: The plot
pandas.DataFrame: The data from which the plot was generated.
"""
if isinstance(quantiles_of_other_factors, float):
quantiles_of_other_factors = [quantiles_of_other_factors]
elif isinstance(quantiles_of_other_factors, tuple):
quantiles_of_other_factors = list(quantiles_of_other_factors)
if plot_marginal_effects:
raise NotImplementedError()
model = process_model(model_dict)
if period >= model["labels"]["periods"][-1]:
raise ValueError(
"*period* must be the penultimate period of the model or earlier."
)
latent_factors = model["labels"]["latent_factors"]
observed_factors = model["labels"]["observed_factors"]
all_factors = model["labels"]["all_factors"]
if observed_factors and data is None:
raise ValueError(
"The model has observed factors. You must pass the empirical data to "
"'visualize_transition_equations' via the keyword *data*."
)
if observed_factors:
_, _, _observed_arr = process_data(
df=data,
labels=model["labels"],
update_info=model["update_info"],
anchoring_info=model["anchoring"],
)
# convert from jax to numpy
_observed_arr = np.array(_observed_arr)
observed_data = pd.DataFrame(
data=_observed_arr[period], columns=observed_factors
)
observed_data["id"] = observed_data.index
observed_data["period"] = period
states_data = pd.merge(
left=states,
right=observed_data,
left_on=["id", "period"],
right_on=["id", "period"],
how="left",
)
else:
states_data = states.copy()
params_index = get_params_index(
update_info=model["update_info"],
labels=model["labels"],
dimensions=model["dimensions"],
transition_info=model["transition_info"],
)
params = params.reindex(params_index)
parsing_info = create_parsing_info(
params_index=params.index,
update_info=model["update_info"],
labels=model["labels"],
anchoring=model["anchoring"],
)
_, _, _, pardict = parse_params(
params=jnp.array(params["value"].to_numpy()),
parsing_info=parsing_info,
dimensions=model["dimensions"],
labels=model["labels"],
n_obs=1,
)
if state_ranges is None:
state_ranges = create_state_ranges(states_data, all_factors)
figsize = (2.5 * len(all_factors), 2 * len(latent_factors))
fig, axes = plt.subplots(
nrows=len(latent_factors),
ncols=len(all_factors),
figsize=figsize,
sharex=sharex,
sharey=sharey,
)
for (output_factor, input_factor), ax in zip(
itertools.product(latent_factors, all_factors), axes.flatten()
):
transition_function = model["transition_info"]["individual_functions"][
output_factor
]
transition_params = {
output_factor: pardict["transition"][output_factor][period]
}
if quantiles_of_other_factors is not None:
plot_data = _prepare_data_for_one_plot_fixed_quantile_2d(
states_data=states_data,
state_ranges=state_ranges,
period=period,
input_factor=input_factor,
output_factor=output_factor,
quantiles_of_other_factors=quantiles_of_other_factors,
n_points=n_points,
transition_function=transition_function,
transition_params=transition_params,
all_factors=all_factors,
)
else:
plot_data = _prepare_data_for_one_plot_average_2d(
states_data=states_data,
state_ranges=state_ranges,
period=period,
input_factor=input_factor,
output_factor=output_factor,
n_points=n_points,
n_draws=n_draws,
transition_function=transition_function,
transition_params=transition_params,
all_factors=all_factors,
)
if (
isinstance(quantiles_of_other_factors, list)
and len(quantiles_of_other_factors) > 1
):
hue = "quantile"
else:
hue = None
sns.lineplot(
data=plot_data,
x=f"{input_factor} in period {period}",
y=f"{output_factor} in period {period + 1}",
hue=hue,
ax=ax,
)
handles, labels = ax.get_legend_handles_labels()
if ax.get_legend() is not None:
ax.get_legend().remove()
if hue is not None:
fig.legend(
handles=handles,
labels=labels,
bbox_to_anchor=(0.5, -0.05),
loc="lower center",
ncol=len(quantiles_of_other_factors),
)
fig.tight_layout()
sns.despine()
return fig
def _prepare_data_for_one_plot_fixed_quantile_2d(
states_data,
state_ranges,
period,
input_factor,
output_factor,
quantiles_of_other_factors,
n_points,
transition_function,
transition_params,
all_factors,
):
period_data = states_data.query(f"period == {period}")[all_factors]
input_min = state_ranges[input_factor].loc[period]["minimum"]
input_max = state_ranges[input_factor].loc[period]["maximum"]
to_concat = []
for quantile in quantiles_of_other_factors:
input_data = pd.DataFrame()
input_data[input_factor] = np.linspace(input_min, input_max, n_points)
fixed_quantiles = period_data.drop(columns=input_factor).quantile(quantile)
input_data[fixed_quantiles.index] = fixed_quantiles
input_arr = jnp.array(input_data[all_factors].to_numpy())
# convert from jax to numpy array
output_arr = np.array(transition_function(transition_params, input_arr))
quantile_data = pd.DataFrame()
quantile_data[f"{input_factor} in period {period}"] = input_data[input_factor]
quantile_data[f"{output_factor} in period {period + 1}"] = np.array(output_arr)
quantile_data["quantile"] = quantile
to_concat.append(quantile_data)
out = | pd.concat(to_concat) | pandas.concat |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
from typing import Optional, Tuple
import pandas as pd
import torchaudio
from examples.speech_to_text.data_utils_new import (
create_zip,
extract_fbank_features,
filter_manifest_df,
gen_config_yaml_with_src,
gen_vocab,
get_zip_manifest,
load_df_from_tsv,
save_df_to_tsv,
asr_normalize,
)
from torch import Tensor
from torch.utils.data import Dataset
from tqdm import tqdm
log = logging.getLogger(__name__)
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "src_text", "tgt_text", "speaker"]
class CommonVoice(Dataset):
"""
Args:
root (str): root path to the dataset and generated manifests/features
source_language (str): source (audio) language
"""
SPLITS = ["train", "dev", "test"]
def __init__(
self,
root: str,
split: str,
source_language: str,
) -> None:
assert split in self.SPLITS
assert source_language is not None
self.root: Path = Path(root)
cv_tsv_path = self.root / f"{split}.tsv"
assert cv_tsv_path.is_file()
df = load_df_from_tsv(cv_tsv_path)
data = df.to_dict(orient="index").items()
data = [v for k, v in sorted(data, key=lambda x: x[0])]
self.data = []
for e in data:
try:
path = self.root / "clips" / e["path"]
_ = torchaudio.info(path.as_posix())
self.data.append(e)
except RuntimeError:
pass
def __getitem__(
self, n: int
) -> Tuple[Tensor, int, str, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
tuple: ``(waveform, sample_rate, sentence, speaker_id, sample_id)``
"""
data = self.data[n]
path = self.root / "clips" / data["path"]
waveform, sample_rate = torchaudio.load(path)
sentence = data["sentence"]
speaker_id = data["client_id"]
_id = data["path"].replace(".mp3", "")
return waveform, sample_rate, sentence, speaker_id, _id
def __len__(self) -> int:
return len(self.data)
def process(args):
root = Path(args.data_root).absolute() / args.src_lang
if not root.is_dir():
raise NotADirectoryError(f"{root} does not exist")
# Extract features
feature_root = root / "fbank"
feature_root.mkdir(exist_ok=True)
for split in CommonVoice.SPLITS:
print(f"Fetching split {split}...")
dataset = CommonVoice(root, split, args.src_lang)
print("Extracting log mel filter bank features...")
for waveform, sample_rate, _, _, utt_id in tqdm(dataset):
extract_fbank_features(
waveform, sample_rate, feature_root / f"{utt_id}.npy", args.n_mel_bins
)
# Pack features into ZIP
zip_path = root / "fbank.zip"
print("ZIPing features...")
create_zip(feature_root, zip_path)
print("Fetching ZIP manifest...")
zip_manifest = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
train_text_src = []
task = f"asr_{args.src_lang}"
for split in CommonVoice.SPLITS:
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = CommonVoice(root, split, args.src_lang)
for wav, sr, src_utt, speaker_id, utt_id in tqdm(dataset):
manifest["id"].append(utt_id)
manifest["audio"].append(zip_manifest[utt_id])
duration_ms = int(wav.size(1) / sr * 1000)
manifest["n_frames"].append(int(1 + (duration_ms - 25) / 10))
manifest["src_text"].append(asr_normalize(src_utt))
manifest["tgt_text"].append(asr_normalize(src_utt))
manifest["speaker"].append(speaker_id)
is_train_split = split.startswith("train")
if is_train_split:
train_text.extend(manifest["tgt_text"])
train_text_src.extend(manifest["src_text"])
df = | pd.DataFrame.from_dict(manifest) | pandas.DataFrame.from_dict |
import unittest
import pandas as pd
import numpy as np
from scipy.sparse.csr import csr_matrix
from string_grouper.string_grouper import DEFAULT_MIN_SIMILARITY, \
DEFAULT_REGEX, DEFAULT_NGRAM_SIZE, DEFAULT_N_PROCESSES, DEFAULT_IGNORE_CASE, \
StringGrouperConfig, StringGrouper, StringGrouperNotFitException, \
match_most_similar, group_similar_strings, match_strings, \
compute_pairwise_similarities
from unittest.mock import patch, Mock
def mock_symmetrize_matrix(x: csr_matrix) -> csr_matrix:
return x
class SimpleExample(object):
def __init__(self):
self.customers_df = pd.DataFrame(
[
('BB016741P', 'Mega Enterprises Corporation', 'Address0', 'Tel0', 'Description0', 0.2),
('CC082744L', 'Hyper Startup Incorporated', '', 'Tel1', '', 0.5),
('AA098762D', 'Hyper Startup Inc.', 'Address2', 'Tel2', 'Description2', 0.3),
('BB099931J', 'Hyper-Startup Inc.', 'Address3', 'Tel3', 'Description3', 0.1),
('HH072982K', 'Hyper Hyper Inc.', 'Address4', '', 'Description4', 0.9),
('EE059082Q', 'Mega Enterprises Corp.', 'Address5', 'Tel5', 'Description5', 1.0)
],
columns=('Customer ID', 'Customer Name', 'Address', 'Tel', 'Description', 'weight')
)
self.customers_df2 = pd.DataFrame(
[
('BB016741P', 'Mega Enterprises Corporation', 'Address0', 'Tel0', 'Description0', 0.2),
('CC082744L', 'Hyper Startup Incorporated', '', 'Tel1', '', 0.5),
('AA098762D', 'Hyper Startup Inc.', 'Address2', 'Tel2', 'Description2', 0.3),
('BB099931J', 'Hyper-Startup Inc.', 'Address3', 'Tel3', 'Description3', 0.1),
('DD012339M', 'HyperStartup Inc.', 'Address4', 'Tel4', 'Description4', 0.1),
('HH072982K', 'Hyper Hyper Inc.', 'Address5', '', 'Description5', 0.9),
('EE059082Q', 'Mega Enterprises Corp.', 'Address6', 'Tel6', 'Description6', 1.0)
],
columns=('Customer ID', 'Customer Name', 'Address', 'Tel', 'Description', 'weight')
)
self.a_few_strings = pd.Series(['BB016741P', 'BB082744L', 'BB098762D', 'BB099931J', 'BB072982K', 'BB059082Q'])
self.one_string = pd.Series(['BB0'])
self.two_strings = pd.Series(['Hyper', 'Hyp'])
self.whatever_series_1 = pd.Series(['whatever'])
self.expected_result_with_zeroes = pd.DataFrame(
[
(1, 'Hyper Startup Incorporated', 0.08170638, 'whatever', 0),
(0, 'Mega Enterprises Corporation', 0., 'whatever', 0),
(2, 'Hyper Startup Inc.', 0., 'whatever', 0),
(3, 'Hyper-Startup Inc.', 0., 'whatever', 0),
(4, 'Hyper Hyper Inc.', 0., 'whatever', 0),
(5, 'Mega Enterprises Corp.', 0., 'whatever', 0)
],
columns=['left_index', 'left_Customer Name', 'similarity', 'right_side', 'right_index']
)
self.expected_result_centroid = pd.Series(
[
'Mega Enterprises Corporation',
'Hyper Startup Inc.',
'Hyper Startup Inc.',
'Hyper Startup Inc.',
'Hyper Hyper Inc.',
'Mega Enterprises Corporation'
],
name='group_rep_Customer Name'
)
self.expected_result_centroid_with_index_col = pd.DataFrame(
[
(0, 'Mega Enterprises Corporation'),
(2, 'Hyper Startup Inc.'),
(2, 'Hyper Startup Inc.'),
(2, 'Hyper Startup Inc.'),
(4, 'Hyper Hyper Inc.'),
(0, 'Mega Enterprises Corporation')
],
columns=['group_rep_index', 'group_rep_Customer Name']
)
self.expected_result_first = pd.Series(
[
'Mega Enterprises Corporation',
'Hyper Startup Incorporated',
'Hyper Startup Incorporated',
'Hyper Startup Incorporated',
'Hyper Hyper Inc.',
'Mega Enterprises Corporation'
],
name='group_rep_Customer Name'
)
class StringGrouperConfigTest(unittest.TestCase):
def test_config_defaults(self):
"""Empty initialisation should set default values"""
config = StringGrouperConfig()
self.assertEqual(config.min_similarity, DEFAULT_MIN_SIMILARITY)
self.assertEqual(config.max_n_matches, None)
self.assertEqual(config.regex, DEFAULT_REGEX)
self.assertEqual(config.ngram_size, DEFAULT_NGRAM_SIZE)
self.assertEqual(config.number_of_processes, DEFAULT_N_PROCESSES)
self.assertEqual(config.ignore_case, DEFAULT_IGNORE_CASE)
def test_config_immutable(self):
"""Configurations should be immutable"""
config = StringGrouperConfig()
with self.assertRaises(Exception) as _:
config.min_similarity = 0.1
def test_config_non_default_values(self):
"""Configurations should be immutable"""
config = StringGrouperConfig(min_similarity=0.1, max_n_matches=100, number_of_processes=1)
self.assertEqual(0.1, config.min_similarity)
self.assertEqual(100, config.max_n_matches)
self.assertEqual(1, config.number_of_processes)
class StringGrouperTest(unittest.TestCase):
def test_auto_blocking_single_DataFrame(self):
"""tests whether automatic blocking yields consistent results"""
# This function will force an OverflowError to occur when
# the input Series have a combined length above a given number:
# OverflowThreshold. This will in turn trigger automatic splitting
# of the Series/matrices into smaller blocks when n_blocks = None
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
# first do manual blocking
sg = StringGrouper(df1, min_similarity=0.1)
pd.testing.assert_series_equal(sg.master, df1)
self.assertEqual(sg.duplicates, None)
matches = fix_row_order(sg.match_strings(df1, n_blocks=(1, 1)))
self.assertEqual(sg._config.n_blocks, (1, 1))
# Create a custom wrapper for this StringGrouper instance's
# _build_matches() method which will later be used to
# mock _build_matches().
# Note that we have to define the wrapper here because
# _build_matches() is a non-static function of StringGrouper
# and needs access to the specific StringGrouper instance sg
# created here.
def mock_build_matches(OverflowThreshold,
real_build_matches=sg._build_matches):
def wrapper(left_matrix,
right_matrix,
nnz_rows=None,
sort=True):
if (left_matrix.shape[0] + right_matrix.shape[0]) > \
OverflowThreshold:
raise OverflowError
return real_build_matches(left_matrix, right_matrix, nnz_rows, sort)
return wrapper
def do_test_with(OverflowThreshold):
nonlocal sg # allows reference to sg, as sg will be modified below
# Now let us mock sg._build_matches:
sg._build_matches = Mock(side_effect=mock_build_matches(OverflowThreshold))
sg.clear_data()
matches_auto = fix_row_order(sg.match_strings(df1, n_blocks=None))
pd.testing.assert_series_equal(sg.master, df1)
pd.testing.assert_frame_equal(matches, matches_auto)
self.assertEqual(sg._config.n_blocks, None)
# Note that _build_matches is called more than once if and only if
# a split occurred (that is, there was more than one pair of
# matrix-blocks multiplied)
if len(sg._left_Series) + len(sg._right_Series) > \
OverflowThreshold:
# Assert that split occurred:
self.assertGreater(sg._build_matches.call_count, 1)
else:
# Assert that split did not occur:
self.assertEqual(sg._build_matches.call_count, 1)
# now test auto blocking by forcing an OverflowError when the
# combined Series' lengths is greater than 10, 5, 3, 2
do_test_with(OverflowThreshold=100) # does not trigger auto blocking
do_test_with(OverflowThreshold=10)
do_test_with(OverflowThreshold=5)
do_test_with(OverflowThreshold=3)
do_test_with(OverflowThreshold=2)
def test_n_blocks_single_DataFrame(self):
"""tests whether manual blocking yields consistent results"""
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
matches11 = fix_row_order(match_strings(df1, min_similarity=0.1))
matches12 = fix_row_order(
match_strings(df1, n_blocks=(1, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches12)
matches13 = fix_row_order(
match_strings(df1, n_blocks=(1, 3), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches13)
matches14 = fix_row_order(
match_strings(df1, n_blocks=(1, 4), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches14)
matches15 = fix_row_order(
match_strings(df1, n_blocks=(1, 5), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches15)
matches16 = fix_row_order(
match_strings(df1, n_blocks=(1, 6), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches16)
matches17 = fix_row_order(
match_strings(df1, n_blocks=(1, 7), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches17)
matches18 = fix_row_order(
match_strings(df1, n_blocks=(1, 8), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches18)
matches21 = fix_row_order(
match_strings(df1, n_blocks=(2, 1), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches21)
matches22 = fix_row_order(
match_strings(df1, n_blocks=(2, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches22)
matches32 = fix_row_order(
match_strings(df1, n_blocks=(3, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches32)
# Create a custom wrapper for this StringGrouper instance's
# _build_matches() method which will later be used to
# mock _build_matches().
# Note that we have to define the wrapper here because
# _build_matches() is a non-static function of StringGrouper
# and needs access to the specific StringGrouper instance sg
# created here.
sg = StringGrouper(df1, min_similarity=0.1)
def mock_build_matches(OverflowThreshold,
real_build_matches=sg._build_matches):
def wrapper(left_matrix,
right_matrix,
nnz_rows=None,
sort=True):
if (left_matrix.shape[0] + right_matrix.shape[0]) > \
OverflowThreshold:
raise OverflowError
return real_build_matches(left_matrix, right_matrix, nnz_rows, sort)
return wrapper
def test_overflow_error_with(OverflowThreshold, n_blocks):
nonlocal sg
sg._build_matches = Mock(side_effect=mock_build_matches(OverflowThreshold))
sg.clear_data()
max_left_block_size = (len(df1)//n_blocks[0]
+ (1 if len(df1) % n_blocks[0] > 0 else 0))
max_right_block_size = (len(df1)//n_blocks[1]
+ (1 if len(df1) % n_blocks[1] > 0 else 0))
if (max_left_block_size + max_right_block_size) > OverflowThreshold:
with self.assertRaises(Exception):
_ = sg.match_strings(df1, n_blocks=n_blocks)
else:
matches_manual = fix_row_order(sg.match_strings(df1, n_blocks=n_blocks))
pd.testing.assert_frame_equal(matches11, matches_manual)
test_overflow_error_with(OverflowThreshold=100, n_blocks=(1, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(1, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(2, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(1, 2))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(4, 4))
def test_n_blocks_both_DataFrames(self):
"""tests whether manual blocking yields consistent results"""
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df['Customer Name']
df2 = simple_example.customers_df2['Customer Name']
matches11 = fix_row_order(match_strings(df1, df2, min_similarity=0.1))
matches12 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches12)
matches13 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 3), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches13)
matches14 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 4), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches14)
matches15 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 5), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches15)
matches16 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 6), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches16)
matches17 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 7), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches17)
matches18 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 8), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches18)
matches21 = fix_row_order(
match_strings(df1, df2, n_blocks=(2, 1), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches21)
matches22 = fix_row_order(
match_strings(df1, df2, n_blocks=(2, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches22)
matches32 = fix_row_order(
match_strings(df1, df2, n_blocks=(3, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches32)
def test_n_blocks_bad_option_value(self):
"""Tests that bad option values for n_blocks are caught"""
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=2)
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(0, 2))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, 2.5))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, 2, 3))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, ))
def test_tfidf_dtype_bad_option_value(self):
"""Tests that bad option values for n_blocks are caught"""
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype=None)
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype=0)
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype='whatever')
def test_compute_pairwise_similarities(self):
"""tests the high-level function compute_pairwise_similarities"""
simple_example = SimpleExample()
df1 = simple_example.customers_df['<NAME>']
df2 = simple_example.expected_result_centroid
similarities = compute_pairwise_similarities(df1, df2)
expected_result = pd.Series(
[
1.0,
0.6336195351561589,
1.0000000000000004,
1.0000000000000004,
1.0,
0.826462625999832
],
name='similarity'
)
expected_result = expected_result.astype(np.float32)
pd.testing.assert_series_equal(expected_result, similarities)
sg = StringGrouper(df1, df2)
similarities = sg.compute_pairwise_similarities(df1, df2)
pd.testing.assert_series_equal(expected_result, similarities)
def test_compute_pairwise_similarities_data_integrity(self):
"""tests that an exception is raised whenever the lengths of the two input series of the high-level function
compute_pairwise_similarities are unequal"""
simple_example = SimpleExample()
df1 = simple_example.customers_df['<NAME>']
df2 = simple_example.expected_result_centroid
with self.assertRaises(Exception):
_ = compute_pairwise_similarities(df1, df2[:-2])
@patch('string_grouper.string_grouper.StringGrouper')
def test_group_similar_strings(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function group_similar_strings utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_groups.return_value = 'whatever'
test_series_1 = None
test_series_id_1 = None
df = group_similar_strings(
test_series_1,
string_ids=test_series_id_1
)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_groups.assert_called_once()
self.assertEqual(df, 'whatever')
@patch('string_grouper.string_grouper.StringGrouper')
def test_match_most_similar(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function match_most_similar utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_groups.return_value = 'whatever'
test_series_1 = None
test_series_2 = None
test_series_id_1 = None
test_series_id_2 = None
df = match_most_similar(
test_series_1,
test_series_2,
master_id=test_series_id_1,
duplicates_id=test_series_id_2
)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_groups.assert_called_once()
self.assertEqual(df, 'whatever')
@patch('string_grouper.string_grouper.StringGrouper')
def test_match_strings(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function match_strings utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_matches.return_value = 'whatever'
test_series_1 = None
test_series_id_1 = None
df = match_strings(test_series_1, master_id=test_series_id_1)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_matches.assert_called_once()
self.assertEqual(df, 'whatever')
@patch(
'string_grouper.string_grouper.StringGrouper._symmetrize_matrix',
side_effect=mock_symmetrize_matrix
)
def test_match_list_symmetry_without_symmetrize_function(self, mock_symmetrize_matrix_param):
"""mocks StringGrouper._symmetrize_matches_list so that this test fails whenever _matches_list is
**partially** symmetric which often occurs when the kwarg max_n_matches is too small"""
simple_example = SimpleExample()
df = simple_example.customers_df2['<NAME>']
sg = StringGrouper(df, max_n_matches=2).fit()
mock_symmetrize_matrix_param.assert_called_once()
# obtain the upper and lower triangular parts of the matrix of matches:
upper = sg._matches_list[sg._matches_list['master_side'] < sg._matches_list['dupe_side']]
lower = sg._matches_list[sg._matches_list['master_side'] > sg._matches_list['dupe_side']]
# switch the column names of lower triangular part (i.e., transpose) to convert it to upper triangular:
upper_prime = lower.rename(columns={'master_side': 'dupe_side', 'dupe_side': 'master_side'})
# obtain the intersection between upper and upper_prime:
intersection = upper_prime.merge(upper, how='inner', on=['master_side', 'dupe_side'])
# if the intersection is empty then _matches_list is completely non-symmetric (this is acceptable)
# if the intersection is not empty then at least some matches are repeated.
# To make sure all (and not just some) matches are repeated, the lengths of
# upper, upper_prime and their intersection should be identical.
self.assertFalse(intersection.empty or len(upper) == len(upper_prime) == len(intersection))
def test_match_list_symmetry_with_symmetrize_function(self):
"""This test ensures that _matches_list is symmetric"""
simple_example = SimpleExample()
df = simple_example.customers_df2['<NAME>']
sg = StringGrouper(df, max_n_matches=2).fit()
# Obtain the upper and lower triangular parts of the matrix of matches:
upper = sg._matches_list[sg._matches_list['master_side'] < sg._matches_list['dupe_side']]
lower = sg._matches_list[sg._matches_list['master_side'] > sg._matches_list['dupe_side']]
# Switch the column names of the lower triangular part (i.e., transpose) to convert it to upper triangular:
upper_prime = lower.rename(columns={'master_side': 'dupe_side', 'dupe_side': 'master_side'})
# Obtain the intersection between upper and upper_prime:
intersection = upper_prime.merge(upper, how='inner', on=['master_side', 'dupe_side'])
# If the intersection is empty this means _matches_list is completely non-symmetric (this is acceptable)
# If the intersection is not empty this means at least some matches are repeated.
# To make sure all (and not just some) matches are repeated, the lengths of
# upper, upper_prime and their intersection should be identical.
self.assertTrue(intersection.empty or len(upper) == len(upper_prime) == len(intersection))
@patch(
'string_grouper.string_grouper.StringGrouper._fix_diagonal',
side_effect=mock_symmetrize_matrix
)
def test_match_list_diagonal_without_the_fix(self, mock_fix_diagonal):
"""test fails whenever _matches_list's number of self-joins is not equal to the number of strings"""
# This bug is difficult to reproduce -- I mostly encounter it while working with very large datasets;
# for small datasets setting max_n_matches=1 reproduces the bug
simple_example = SimpleExample()
df = simple_example.customers_df['<NAME>']
matches = match_strings(df, max_n_matches=1)
mock_fix_diagonal.assert_called_once()
num_self_joins = len(matches[matches['left_index'] == matches['right_index']])
num_strings = len(df)
self.assertNotEqual(num_self_joins, num_strings)
def test_match_list_diagonal(self):
"""This test ensures that all self-joins are present"""
# This bug is difficult to reproduce -- I mostly encounter it while working with very large datasets;
# for small datasets setting max_n_matches=1 reproduces the bug
simple_example = SimpleExample()
df = simple_example.customers_df['Customer Name']
matches = match_strings(df, max_n_matches=1)
num_self_joins = len(matches[matches['left_index'] == matches['right_index']])
num_strings = len(df)
self.assertEqual(num_self_joins, num_strings)
def test_zero_min_similarity(self):
"""Since sparse matrices exclude zero elements, this test ensures that zero similarity matches are
returned when min_similarity <= 0. A bug related to this was first pointed out by @nbcvijanovic"""
simple_example = SimpleExample()
s_master = simple_example.customers_df['Customer Name']
s_dup = simple_example.whatever_series_1
matches = match_strings(s_master, s_dup, min_similarity=0)
pd.testing.assert_frame_equal(simple_example.expected_result_with_zeroes, matches)
def test_zero_min_similarity_small_max_n_matches(self):
"""This test ensures that a warning is issued when n_max_matches is suspected to be too small while
min_similarity <= 0 and include_zeroes is True"""
simple_example = SimpleExample()
s_master = simple_example.customers_df['Customer Name']
s_dup = simple_example.two_strings
with self.assertRaises(Exception):
_ = match_strings(s_master, s_dup, max_n_matches=1, min_similarity=0)
def test_get_non_matches_empty_case(self):
"""This test ensures that _get_non_matches() returns an empty DataFrame when all pairs of strings match"""
simple_example = SimpleExample()
s_master = simple_example.a_few_strings
s_dup = simple_example.one_string
sg = StringGrouper(s_master, s_dup, max_n_matches=len(s_master), min_similarity=0).fit()
self.assertTrue(sg._get_non_matches_list().empty)
def test_n_grams_case_unchanged(self):
"""Should return all ngrams in a string with case"""
test_series = pd.Series(pd.Series(['aaa']))
# Explicit do not ignore case
sg = StringGrouper(test_series, ignore_case=False)
expected_result = ['McD', 'cDo', 'Don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_n_grams_ignore_case_to_lower(self):
"""Should return all case insensitive ngrams in a string"""
test_series = pd.Series(pd.Series(['aaa']))
# Explicit ignore case
sg = StringGrouper(test_series, ignore_case=True)
expected_result = ['mcd', 'cdo', 'don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_n_grams_ignore_case_to_lower_with_defaults(self):
"""Should return all case insensitive ngrams in a string"""
test_series = pd.Series(pd.Series(['aaa']))
# Implicit default case (i.e. default behaviour)
sg = StringGrouper(test_series)
expected_result = ['mcd', 'cdo', 'don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_build_matrix(self):
"""Should create a csr matrix only master"""
test_series = pd.Series(['foo', 'bar', 'baz'])
sg = StringGrouper(test_series)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
c = csr_matrix([[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]])
np.testing.assert_array_equal(c.toarray(), master.toarray())
np.testing.assert_array_equal(c.toarray(), dupe.toarray())
def test_build_matrix_master_and_duplicates(self):
"""Should create a csr matrix for master and duplicates"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
master_expected = csr_matrix([[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 1., 0., 0.]])
dupes_expected = csr_matrix([[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 0., 1., 0.]])
np.testing.assert_array_equal(master_expected.toarray(), master.toarray())
np.testing.assert_array_equal(dupes_expected.toarray(), dupe.toarray())
def test_build_matches(self):
"""Should create the cosine similarity matrix of two series"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
expected_matches = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]])
np.testing.assert_array_equal(expected_matches, sg._build_matches(master, dupe)[0].toarray())
def test_build_matches_list(self):
"""Should create the cosine similarity matrix of two series"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
sg = sg.fit()
master = [0, 1]
dupe_side = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'master_side': master, 'dupe_side': dupe_side, 'similarity': similarity})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg._matches_list)
def test_case_insensitive_build_matches_list(self):
"""Should create the cosine similarity matrix of two case insensitive series"""
test_series_1 = pd.Series(['foo', 'BAR', 'baz'])
test_series_2 = pd.Series(['FOO', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
sg = sg.fit()
master = [0, 1]
dupe_side = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'master_side': master, 'dupe_side': dupe_side, 'similarity': similarity})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
| pd.testing.assert_frame_equal(expected_df, sg._matches_list) | pandas.testing.assert_frame_equal |
name = 'nfl_data_py'
import pandas
import numpy
import datetime
def import_pbp_data(years, columns=None, downcast=True):
"""Imports play-by-play data
Args:
years (List[int]): years to get PBP data for
columns (List[str]): only return these columns
downcast (bool): convert float64 to float32, default True
Returns:
DataFrame
"""
if not isinstance(years, (list, range)):
raise ValueError('Input must be list or range.')
if min(years) < 1999:
raise ValueError('Data not available before 1999.')
if columns is None:
columns = []
plays = pandas.DataFrame()
url1 = r'https://github.com/nflverse/nflfastR-data/raw/master/data/play_by_play_'
url2 = r'.parquet'
for year in years:
try:
if len(columns) != 0:
data = pandas.read_parquet(url1 + str(year) + url2, columns=columns, engine='fastparquet')
else:
data = pandas.read_parquet(url1 + str(year) + url2, engine='fastparquet')
raw = pandas.DataFrame(data)
raw['season'] = year
if len(plays) == 0:
plays = raw
else:
plays = plays.append(raw)
print(str(year) + ' done.')
except:
print('Data not available for ' + str(year))
# converts float64 to float32, saves ~30% memory
if downcast:
cols = plays.select_dtypes(include=[numpy.float64]).columns
plays.loc[:, cols] = plays.loc[:, cols].astype(numpy.float32)
return plays
def import_weekly_data(years, columns=None, downcast=True):
"""Imports weekly player data
Args:
years (List[int]): years to get PBP data for
columns (List[str]): only return these columns
downcast (bool): convert float64 to float32, default True
Returns:
DataFrame
"""
if not isinstance(years, (list, range)):
raise ValueError('Input must be list or range.')
if min(years) < 1999:
raise ValueError('Data not available before 1999.')
if columns is None:
columns = []
data = pandas.read_parquet(r'https://github.com/nflverse/nflfastR-data/raw/master/data/player_stats.parquet', engine='fastparquet')
data = data[data['season'].isin(years)]
if len(columns) > 0:
data = data[columns]
# converts float64 to float32, saves ~30% memory
if downcast:
cols = data.select_dtypes(include=[numpy.float64]).columns
data.loc[:, cols] = data.loc[:, cols].astype(numpy.float32)
return data
def import_seasonal_data(years, s_type='REG'):
if not isinstance(years, (list, range)):
raise ValueError('years input must be list or range.')
if min(years) < 1999:
raise ValueError('Data not available before 1999.')
if s_type not in ('REG','ALL','POST'):
raise ValueError('Only REG, ALL, POST allowed for s_type.')
data = pandas.read_parquet(r'https://github.com/nflverse/nflfastR-data/raw/master/data/player_stats.parquet', engine='fastparquet')
if s_type == 'ALL':
data = data[data['season'].isin(years)]
else:
data = data[(data['season'].isin(years)) & (data['season_type'] == s_type)]
pgstats = data[['recent_team', 'season', 'week', 'attempts', 'completions', 'passing_yards', 'passing_tds',
'passing_air_yards', 'passing_yards_after_catch', 'passing_first_downs',
'fantasy_points_ppr']].groupby(
['recent_team', 'season', 'week']).sum().reset_index()
pgstats.columns = ['recent_team', 'season', 'week', 'atts', 'comps', 'p_yds', 'p_tds', 'p_ayds', 'p_yac', 'p_fds',
'ppr_pts']
all_stats = data[
['player_id', 'player_name', 'recent_team', 'season', 'week', 'carries', 'rushing_yards', 'rushing_tds',
'rushing_first_downs', 'rushing_2pt_conversions', 'receptions', 'targets', 'receiving_yards', 'receiving_tds',
'receiving_air_yards', 'receiving_yards_after_catch', 'receiving_first_downs', 'receiving_epa',
'fantasy_points_ppr']].merge(pgstats, how='left', on=['recent_team', 'season', 'week']).fillna(0)
season_stats = all_stats.drop(['recent_team', 'week'], axis=1).groupby(
['player_id', 'player_name', 'season']).sum().reset_index()
season_stats['tgt_sh'] = season_stats['targets'] / season_stats['atts']
season_stats['ay_sh'] = season_stats['receiving_air_yards'] / season_stats['p_ayds']
season_stats['yac_sh'] = season_stats['receiving_yards_after_catch'] / season_stats['p_yac']
season_stats['wopr'] = season_stats['tgt_sh'] * 1.5 + season_stats['ay_sh'] * 0.8
season_stats['ry_sh'] = season_stats['receiving_yards'] / season_stats['p_yds']
season_stats['rtd_sh'] = season_stats['receiving_tds'] / season_stats['p_tds']
season_stats['rfd_sh'] = season_stats['receiving_first_downs'] / season_stats['p_fds']
season_stats['rtdfd_sh'] = (season_stats['receiving_tds'] + season_stats['receiving_first_downs']) / (
season_stats['p_tds'] + season_stats['p_fds'])
season_stats['dom'] = (season_stats['ry_sh'] + season_stats['rtd_sh']) / 2
season_stats['w8dom'] = season_stats['ry_sh'] * 0.8 + season_stats['rtd_sh'] * 0.2
season_stats['yptmpa'] = season_stats['receiving_yards'] / season_stats['atts']
season_stats['ppr_sh'] = season_stats['fantasy_points_ppr'] / season_stats['ppr_pts']
data.drop(['recent_team', 'week'], axis=1, inplace=True)
szn = data.groupby(['player_id', 'player_name', 'season', 'season_type']).sum().reset_index().merge(
data[['player_id', 'season', 'season_type']].groupby(['player_id', 'season']).count().reset_index().rename(
columns={'season_type': 'games'}), how='left', on=['player_id', 'season'])
szn = szn.merge(season_stats[['player_id', 'season', 'tgt_sh', 'ay_sh', 'yac_sh', 'wopr', 'ry_sh', 'rtd_sh',
'rfd_sh', 'rtdfd_sh', 'dom', 'w8dom', 'yptmpa', 'ppr_sh']], how='left',
on=['player_id', 'season'])
return szn
def see_pbp_cols():
data = pandas.read_parquet(r'https://github.com/nflverse/nflfastR-data/raw/master/data/play_by_play_2020.parquet', engine='fastparquet')
cols = data.columns
return cols
def see_weekly_cols():
data = pandas.read_parquet(r'https://github.com/nflverse/nflfastR-data/raw/master/data/player_stats.parquet', engine='fastparquet')
cols = data.columns
return cols
def import_rosters(years, columns=None):
if not isinstance(years, (list, range)):
raise ValueError('years input must be list or range.')
if min(years) < 1999:
raise ValueError('Data not available before 1999.')
if columns is None:
columns = []
rosters = []
for y in years:
temp = pandas.read_csv(r'https://github.com/mrcaseb/nflfastR-roster/blob/master/data/seasons/roster_' + str(y)
+ '.csv?raw=True', low_memory=False)
rosters.append(temp)
rosters = pandas.DataFrame(pandas.concat(rosters)).rename(
columns={'full_name': 'player_name', 'gsis_id': 'player_id'})
rosters.drop_duplicates(subset=['season', 'player_name', 'position', 'player_id'], keep='first', inplace=True)
if len(columns) > 0:
rosters = rosters[columns]
def calc_age(x):
ca = pandas.to_datetime(x[0])
bd = pandas.to_datetime(x[1])
return ca.year - bd.year + numpy.where(ca.month > bd.month, 0, -1)
if 'birth_date' in columns and 'current_age' in columns:
rosters['current_age'] = rosters['season'].apply(lambda x: datetime.datetime(int(x), 9, 1))
rosters['age'] = rosters[['current_age', 'birth_date']].apply(calc_age, axis=1)
rosters.drop(['current_age'], axis=1, inplace=True)
rosters.dropna(subset=['player_id'], inplace=True)
return rosters
def import_team_desc():
df = | pandas.read_csv(r'https://github.com/nflverse/nflfastR-data/raw/master/teams_colors_logos.csv') | pandas.read_csv |
import logging
from typing import Tuple
import pandas as pd
from pandas import DataFrame
from dbnd import task
from dbnd.testing.helpers_pytest import assert_run_task
from dbnd_test_scenarios.test_common.targets.target_test_base import TargetTestBase
logger = logging.getLogger(__name__)
@task(result=("features", "scores"))
def t_d_multiple_return(p: int) -> (DataFrame, int):
return | pd.DataFrame(data=[[p, 1]], columns=["c1", "c2"]) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.